一直没上完的课,有时间抓紧来补课!!!
记下来 菜菜省的以后忘记,也分享有需要的人!!
import torch
import torch.nn.functional as F
import torch.optim as optimz # 引入优化器模块
from torchvision import transforms # 对图像处理
from torchvision import datasets # 数据的读取
from torch.utils.data import DataLoader # 加载和处理数据
batch_size = 64
# 数据处理的约定
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
# dataset prepare
# root 读取所有根文件目录,
train_dataset = datasets.MNIST(root='../hello/MNISTDATA', train=True, download=False, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../hello/MNISTDATA', train=False, download=False, transform=transform)
test_loader = DataLoader(train_dataset, shuffle=False, batch_size=batch_size)
# 下面定义的模型是针对 老师在第10课中最后留的作业,3个卷积层三个下采样层
# 三个全连接层 而且中间的一些层的基本属性做了变换,主要就是做好矩阵的相乘
# 不出问题前提下,随意构建,可以结合吴恩达老师深度学习卷积神经网络的课理
#解,会非常清晰
# module construction
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5, stride=1, padding=0)
#这里用了两种写法,可以写全,也可以直接数字 可以看下参数介绍加以理解
self.conv2 = torch.nn.Conv2d(in_channels=10, out_channels=20, kernel_size=[5, 5], stride=[1, 1], padding=[0, 0])
# 核函数本身是m*n矩阵 不过是给定一个实数默认是方阵了,
# 比如5*5,只要做好输出与下一层之间的一致性就ok
self.conv3 = torch.nn.Conv2d(20, 24, kernel_size=2, stride=2, padding=1)
self.pooling1 = torch.nn.MaxPool2d(kernel_size=[2, 2], stride=2, padding=0)
self.pooling2 = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=0)
self.pooling3 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc1 = torch.nn.Linear(in_features=96, out_features=48, bias=False)
self.fc2 = torch.nn.Linear(in_features=48, out_features=24, bias=False)
self.fc3 = torch.nn.Linear(in_features=24, out_features=10, bias=False)
def forward(self, x):
batch_size = x.size(0) # 样本数量 上面传入的时候就已经是64个batch了
x = self.pooling1(F.relu(self.conv1(x)))
x = self.pooling2(F.relu(self.conv2(x)))
x = self.pooling3(F.relu(self.conv3(x)))
x = x.view(batch_size, -1) # 这里每组batch数据维度(64,320)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
# 实例化模型
model = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 利用if语句,有cuda版本的化若使用语句就默认为True 此时使用电脑显卡0号位置,否则就使用cpu版本
model.to(device) # to 把缓存 权重etc..都放入cuda中
# optimizer criterion 交叉熵损失函数和SGD优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = optimz.SGD(model.parameters(), lr=0.01, momentum=0.5)
# dataset train and test
def train(epoch):
running_loss = 0
for batch_idx, data in enumerate(train_loader, 0):
inputs, labels = data
# 引入cuda 然鹅我没有装cuda
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad() # 先进行优化器梯度清零
y_pred = model(inputs) # 前向传播
loss = criterion(y_pred, labels)
loss.backward() # 损失反向传播
optimizer.step() # 优化器更新
running_loss += loss.item() # 注意item的使用转为实数
if batch_idx % 300 == 299: # 针对batch来说300轮统计一下信息
print("[%d %5d] loss:%.3f" % (epoch + 1, batch_idx + 1, loss / 300)) # 使用到了字符串格式化输出
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0) # 注意加上size 不然张量无法加数
correct += (predicted == labels).sum().item()
print('Test Accuracy is: %d %%' % (correct * 100 / total))
if __name__ == '__main__':
for epoch in range(5): # 注意循环体的写法
train(epoch)
test()
# % 97%-98% 提升1个百分点 换个方向说确实错误率从3%-2% 降低了1/3 有30%的改进。 所以看论文 看清从什么角度