Pytorch笔记
pytorch与numpy的相互转换
- 在CPU上,numpy和tensor共用同一块存储空间。
import time
import numpy as np
import torch
def numpy_to_tensor_to_gpu_to_cpu_tensor_to_numpy():
data_np = np.array([[1,2,0],[3,4,5]])
data_tr_s = torch.from_numpy(data_np)
data_tr_d = torch.tensor(data_np, dtype=int)
print('0.cpu orgin data_np', list(data_np))
print('0.cpu orgin data_tr_s', list(data_tr_s))
print('0.cpu orgin data_tr_d', list(data_tr_d))
data_np[0][1] = data_np[1][0]
print('1.cpu modify data_np', list(data_np))
print('1.cpu modify data_tr_s', list(data_tr_s))
print('1.cpu modify data_tr_d', list(data_tr_d))
data_tr_s = data_tr_s.cuda()
data_tr_d = data_tr_d.cuda()
print('2.gpu orign data_tr_s', list(data_tr_s))
print('2.gpu orign data_tr_d', list(data_tr_d))
data_tr_s = data_tr_s.add_(1)
data_tr_d = data_tr_d.add_(1)
print('2.gpu modify data_tr_s', list(data_tr_s))
print('2.gpu modify data_tr_d', list(data_tr_d))
data_tr_s = data_tr_s.cpu()
data_tr_d = data_tr_d.cpu()
print('3.cpu modify data_tr_s', list(data_tr_s))
print('3.cpu modify data_tr_d', list(data_tr_d))
data_np_s = data_tr_s.numpy()
data_np_d = data_tr_d.numpy()
data_tr_s = data_tr_s.add_(1)
data_tr_d = data_tr_d.add_(1)
print('3.cpu modify data_tr_s', list(data_tr_s))
print('3.cpu modify data_tr_d', list(data_tr_d))
print('3.cpu modify data_np_s', list(data_np_s))
print('3.cpu modify data_np_d', list(data_np_d))
if __name__ == '__main__':
numpy_to_tensor_to_gpu_to_cpu_tensor_to_numpy()
import time
import numpy as np
import cupy as cp
import torch
def time_of_numpy_to_cupy_to_numpy(data_size):
data_np = np.ones((data_size,data_size), dtype=float)
time_start = time.perf_counter()
data_cp = cp.asarray(data_np)
time_end = time.perf_counter()
print('cp np_to_cp duration {}ms'.format((time_end-time_start)*1000))
time_start = time.perf_counter()
data_cp = cp.matmul(data_cp, data_cp)
time_end = time.perf_counter()
print('cp operate duration {}ms'.format((time_end-time_start)*1000))
time_start = time.perf_counter()
data_np = np.matmul(data_np, data_np)
time_end = time.perf_counter()
print('np operate duration {}ms'.format((time_end-time_start)*1000))
time_start = time.perf_counter()
data_np = cp.asnumpy(data_cp)
time_end = time.perf_counter()
print('cp np_to_cp duration {}ms'.format((time_end-time_start)*1000))
def time_of_numpy_to_pytorch_to_numpy(data_size):
data_np = np.ones((data_size,data_size), dtype=float)
time_start = time.perf_counter()
data_tr = torch.from_numpy(data_np)
time_end = time.perf_counter()
print('torch np_to_torch duration {}ms'.format((time_end-time_start)*1000))
time_start = time.perf_counter()
data_tr = data_tr.cuda()
time_end = time.perf_counter()
print('torch cpu_to_cuda duration {}ms'.format((time_end-time_start)*1000))
data_tr_1 = torch.add(data_tr, 1)
time_start = time.perf_counter()
data_tr = torch.mm(data_tr_1, data_tr)
time_end = time.perf_counter()
print('torch operate duration {}ms'.format((time_end-time_start)*1000))
time_start = time.perf_counter()
data_tr = data_tr.cpu()
time_end = time.perf_counter()
print('torch cuda_to_cpu duration {}ms'.format((time_end-time_start)*1000))
time_start = time.perf_counter()
data_np = data_tr.numpy()
time_end = time.perf_counter()
print('torch duration torch_to_np {}ms'.format((time_end-time_start)*1000))
if __name__ == '__main__':
data_size = 10000
time_of_numpy_to_cupy_to_numpy(data_size)
time_of_numpy_to_pytorch_to_numpy(data_size)