步骤:
# 1.prepare dataset # 2.design model using Class # 3.Construct loss and optimizer # 4.Training cycle + Test
1.prepare dataset
(与之前相比,添加的步骤:)将输进来的图片要进行transform,将原图像由W*H*C转换成C*W*H,并把图片压缩到[0, 1]之间
pytorch对图像进行处理的图像顺序是C*W*H
C:channel通道数
w:图像的宽度
H:图像的高度
首先定义batch的大小,然后transform操作
使用datasets设置数据集,导入训练数据集,导入测试数据集
使用DataLoader来加载数据集,完成对数据想要进行的操作
Transform具体操作:
transform = transforms.compose([transforms.ToTensor(), transforms.Normalize(0.1307, ),(0.3087, )])
ToTensor操作是将输入的数据进行归一化,并且转化成张量的类型。
Normalize操作:定义均值和标准差
易错点:sum中运算:dim=1表示对每一行的各个列求和,dim=0,表示每一列的各行进行求和
max:dim=1表示求出各个列之中取个最大值,dim=0,表示求出每一列的所有行中的最大值。
batch_size = 64
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = torch.nn.Linear(784, 512)
self.l2 = torch.nn.Linear(512, 256)
self.l3 = torch.nn.Linear(256, 128)
self.l4 = torch.nn.Linear(128, 64)
self.l5 = torch.nn.Linear(64, 10)
def forward(self, x):
x = x.view(-1, 784) # -1其实就是自动获取mini_batch
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x) # 最后一层不做激活,不进行非线性变换
model = Net()
# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
# training cycle forward, backward, update def train(epoch): running_loss = 0.0 for batch_idx, data in enumerate(train_loader, 0): # 获得一个批次的数据和标签 inputs, target = data optimizer.zero_grad() # 获得模型预测结果(64, 10) outputs = model(inputs) # print(outputs) # 0到1的数 # 交叉熵代价函数outputs(64,10),target(64) loss = criterion(outputs, target) loss.backward() optimizer.step() running_loss += loss.item() if batch_idx % 300 == 299: print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300)) running_loss = 0.0 def test(): correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels = data outputs = model(images) """64*10的张量,且范围是随意的,没有进行softmax, 相当于64个样本,10个输出值,输出值越大,则认为他的概率就越大""" # print(outputs.data) _, predicted = torch.max(outputs.data, dim=1) # dim = 1 ,表示求出各个列之中的最大值 print('染我看看',outputs.data) print('啊对对对',predicted) print('啊错错错',labels) total += labels.size(0) correct += (predicted == labels).sum().item() # 张量之间的比较运算 """把每张图片的最大值提取出来,总共64张图片所以有64个值 把每个图片预测的值与目标的值进行对比,相同输出1 看预测对的个数占所有个数的比例 """ print('accuracy on test set: %d %% ' % (100 * correct / total)) if __name__ == '__main__': for epoch in range(10): train(epoch) test()