文章目录
1、torch 与 numpy 之间的转化
import torch
import numpy as np
np_data = np.arange(6).reshape((2, 3)) # numpy array
torch_data = torch.from_numpy(np_data) # torch tensor
torch_data2np = torch_data.numpy() # tensor -> array
# np_data: [[0 1 2]
# [3 4 5]]
# torch_data: tensor([[0, 1, 2],
# [3, 4, 5]])
# torch_data2np: [[0 1 2]
# [3 4 5]]
2、数学运算
PyTorch 中的常用数学计算可见官网:https://pytorch.org/docs/stable/torch.html#math-operations
# abs 绝对值计算
data = [-1, -2, 3, 4]
tensor = torch.FloatTensor(data) # 转换成32位浮点 tensor
# data: [1 2 3 4]
# tensor: tensor([1., 2., 3., 4.])
# sin 三角函数 sin
np.sin(data)
torch.sin(tensor)
# numpy: [-0.84147098 -0.90929743 0.14112001 -0.7568025 ]
# torch: tensor([-0.8415, -0.9093, 0.1411, -0.7568])
# mean 均值
np.mean(data)
torch.mean(tensor)
# numpy: 1.0
# torch: tensor(1.)
# matrix multiplication 矩阵点乘
data = np.arange(4).reshape((2, 2))
tensor = torch.FloatTensor(data) # 转换成32位浮点 tensor
data.dot(data)
np.matmul(data, data)
torch.mm(tensor, tensor)
# numpy: [[ 2 3]
# [ 6 11]]
# numpy: [[ 2 3]
# [ 6 11]]
# torch: tensor([[ 2., 3.],
# [ 6., 11.]])
3、torch中的一些激活函数
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
# 生成一组线性的 tensor
x = torch.linspace(-5, 5, 200)
x_np = x.data.numpy() # 换成 numpy array, 出图时用
# 几种常用的激励函数
y_relu = F.relu(x).data.numpy()
y_sigmoid = torch.sigmoid(x).data.numpy()
y_tanh = torch.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')
plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')
plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')
plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')
plt.show()
4、torch建立网络的两种方式
import torch
# method 1:建立一个类,继承父类torch.nn.Module
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = torch.relu(self.hidden(x)) # activation function for hidden layer
x = self.out(x)
return x
# method 2:直接使用torch.nn.Sequential()
net2 = torch.nn.Sequential(
torch.nn.Linear(2, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 2),
)
5、模型的保存和使用
import torch
net = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
# 2种方式保存
torch.save(net, 'net.pkl') # 完整的网络及参数
torch.save(net.state_dict(), 'net_params.pkl') # 只保存网络中的参数
# 2种方式提取
# 方法1:由于保存的是完整的网络和参数,所以只需要一键导入即可
net1 = torch.load('net.pkl')
# 方法2:因为只保存了网络的参数,所以需要先建立与保存的网络参数一致的网络,再进行参数的导入
net2 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1)
)
net2.load_state_dict(torch.load('net_params.pkl'))
6、torch中的batch
import torch
import torch.utils.data as Data
torch.manual_seed(1) # reproducible
BATCH_SIZE = 8
x = torch.linspace(1, 10, 10) # this is x data (torch tensor)
y = torch.linspace(10, 1, 10) # this is y data (torch tensor)
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=False, # random shuffle for training
num_workers=2, # subprocesses for loading data
)
def show_batch():
for epoch in range(3): # train entire dataset 3 times
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
# train your data...
print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
batch_x.numpy(), '| batch y: ', batch_y.numpy())
if __name__ == '__main__':
show_batch()
7、几种常见的optimizer
import torch
opt_SGD = torch.optim.SGD()
opt_Momentum = torch.optim.SGD(momentum=0.6)
opt_RMSprop = torch.optim.RMSprop()
opt_Adam = torch.optim.Adam()