pytorch下载
https://download.pytorch.org/whl/torch
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
入门模型-线性
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch import nn #nn模块是torch中最重要的模块
data =pd.read_csv('dataset/Income1.csv')
data.info()
#绘制图像
plt.scatter(data.Education,data,Income)
#添加标签
plt.xlabel('Education')
plt.ylabel('Income')
#输入,取出numpy数值,-1代表自动计算,1代表维度的长度,变为30个维度为1的输入
X=torch.from_numpy(data.Education.values.reshape(-1,1)).type(torch.FloatTensor) #输出(30,1)
Y=torch.from_numpy(data.Income.values.reshape(-1,1)).type(torch.FloatTensor)
X.shape
#必须继承自nn.Module
class EIModel(nn.Module):
def __init__(self):
#调用父类的构造函数
super(EIModel,self).__init__()
#Linear层初始化w和b:f(x)=w*x+b,会根据输入维度和输出维度自动初始化一些权重和
self.linear=nn.Linear(in_features=1,out_features=1) #输入特征长度和输出特征长度
def forward(self,inputs): #前向传播
logits=self.linear(inputs)
return logits
#实例化类
model=EIModel() #model输出为:EIModel((linear):Linear(in_features=1,out_features=1,bias=True)
#loss_fn返回一个可调用的对象
loss_fn=nn.MSELoss() #损失均方误差计算函数
#定义优化函数,使用优化器,优化模型的参数,model.parameters()返回模型的可训练参数,lr:学习率
#SGD随机梯度下降
opt=torch.optim.SGD(model.parameters(),lr=0.0001)
for epoch in range(5000):
#同时对X和Y进行同时迭代
for x,y in zip(X,Y):
y_pred=model(x) #x被forward()方法接收,就是inputs
loss=loss_fn(y_pred,y) #预测输出和和真实输出
#pytorch会累积每次计算的梯度
opt.zero_grad() #将以前的梯度清零
loss.backward() #通过反向传播确定模型的方向应该向哪个方向移动(model.parameters())
opt.step() #沿着下降最快的方向移动
list(model.parameters()) #返回优化后的值,分别代表权重和偏置
list(model.named_parameters()) #返回名称和优化有的值
model.linear.weight
model.linear.bias
#绘制直线图,detach()截断梯度,不获取梯度
plt.plot(X,model(X).detach().numpy(),c='r') #将tensor转换为numpy
分解写法
#此时w和b梯度为空
w=torch.randn(1,requires_grad=True)
b=torch.zeros(1,requires_grad=True)
learning_rate=0.0001
for epoch in range(5000):
for x,y in zip(X,Y): #循环一次后,w、b梯度不为空
y_pred=torch.matmul(x,w)+b
loss=(y-y_pred).pow(2).mean() #均方误差
if not w.grad is None:
w.grad.data.zero_()
loss.backward() #计算权重和偏置的梯度
with torch.no_grad():
#根据梯度对权重和偏置的值进行随机梯度下降
w.data -= w.grad.data*learning_rate
b.data-=b.grad.data*learning_rate
#直接打印w就是权重
#直接打印b就是偏置
#画出函数图
plt.scatter(data.Education, data.Income)
plt.plot(X.numpy(),(X*w+b).data.numpy(),c='r')
逻辑回归损失函数:Sigmod函数
MINIST模型训练与测试
训练/验证:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# 导入对应库
import ssl
# 全局关闭ssl验证
ssl._create_default_https_context = ssl._create_unverified_context
# 加载数据集
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=False)
# 定义模型
class SimpleMLP(nn.Module):
def __init__(self):
super(SimpleMLP, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 784) # 将输入展平为一维
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
model = SimpleMLP()
# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# 训练模型
num_epochs = 5
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad() # 清空过往梯度
output = model(data) # 前向传播
print(output)
loss = criterion(output, target) # 计算损失
loss.backward() # 反向传播,计算当前梯度
optimizer.step() # 根据梯度更新网络参数
running_loss += loss.item()
if batch_idx % 100 == 99:
print(f'Epoch {epoch+1}, Batch {batch_idx+1}, Loss: {running_loss / 100:.4f}')
running_loss = 0.0
torch.save(model.state_dict(), 'save.pt')
# 测试模型
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
_, predicted = torch.max(output.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
print(f'Accuracy of the network on the 10000 test images: {100 * correct / total}%')
导出onnx
from minist_train import *
x = torch.randn(1, 1, 28, 28)
torch.onnx.export(model, x, "simple_mnist.onnx", export_params=True, opset_version=16, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
onnx在线可视化模型结构
simple_mnist.onnx (netron.app)
onnxruntime推理
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import onnxruntime as ort
import numpy as np
# 导入对应库
import ssl
# 全局关闭ssl验证
ssl._create_default_https_context = ssl._create_unverified_context
# 加载数据集
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=False)
# 加载 ONNX 模型
session = ort.InferenceSession('./simple_mnist.onnx')
correct = 0
total = 0
# 准备输入数据
for i,(input_data, target) in enumerate(test_loader):
input_data_numpy = input_data.numpy()
# 运行模型
input_name = session.get_inputs()[0].name
outputs = session.run([], {input_name: input_data_numpy})
predicted = np.argmax(outputs[0], axis=1)
total += target.size(0)
target=target.numpy()
correct += (predicted == target).sum().item()
# 输出结果
#print(outputs)
print(predicted)
print(target)
print(f'Accuracy of the network on the 10000 test images: {100 * correct / total}%')
训练曲线
import torch
import torchvision
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10
random_seed = 1
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
# print(example_targets)
# print(example_data.shape)
fig = plt.figure()
for i in range(6):
plt.subplot(2, 3, i + 1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
plt.show()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i * len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
train_losses.append(loss.item())
train_counter.append((batch_idx * 64) + ((epoch - 1) * len(train_loader.dataset)))
torch.save(network.state_dict(), './model.pth')
torch.save(optimizer.state_dict(), './optimizer.pth')
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
train(1)
test() # 不加这个,后面画图就会报错:x and y must be the same size
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
with torch.no_grad():
output = network(example_data)
fig = plt.figure()
for i in range(6):
plt.subplot(2, 3, i + 1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Prediction: {}".format(output.data.max(1, keepdim=True)[1][i].item()))
plt.xticks([])
plt.yticks([])
plt.show()
# ----------------------------------------------------------- #
continued_network = Net()
continued_optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)
network_state_dict = torch.load('model.pth')
continued_network.load_state_dict(network_state_dict)
optimizer_state_dict = torch.load('optimizer.pth')
continued_optimizer.load_state_dict(optimizer_state_dict)
# 注意不要注释前面的“for epoch in range(1, n_epochs + 1):”部分,
# 不然报错:x and y must be the same size
# 为什么是“4”开始呢,因为n_epochs=3,上面用了[1, n_epochs + 1)
for i in range(4, 9):
test_counter.append(i*len(train_loader.dataset))
train(i)
test()
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
plt.show()
模型切分
数据并行
张量并行
流水并行