import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append("..")
import painting as pt
#1.获取和读取数据
batch_size=256
train_iter,test_iter=pt.load_data_fashion_mnist(batch_size)
"""
softmax回归的输出层是一个全连接层,因此用一个线性模块就行,由于前面我们数据
返回的每个batch样本x的形状为(batch_size,1,28,28),因此要先用view()将x的
形状转换成(batch_size,784)才送入全连接层
"""
#2.定义和初始化模型
num_inputs=784
num_outputs=10
class LinearNet(nn.Module):
def __init__(self,num_inputs,num_outputs):
super(LinearNet, self).__init__()
self.linear=nn.Linear(num_inputs,num_outputs)
def forward(self,x): # x shape:(batch,1,28,28)
y=self.linear(x.view(x.shape[0],-1))
return y
net=LinearNet(num_inputs,num_outputs)
#我们将对x的形状转换的这个功能自定义一个FlattenLayer并记录在painting中方便以后使用
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape:(batch,*,*,...)
return x.view(x.shape[0],-1)
#3.这样就可以更方便的定义我们的模型
from collections import OrderedDict
net=nn.Sequential(
#FlattenLayer(),
#nn.Linear(num_inputs,num_outputs)
OrderedDict([
('flatten',FlattenLayer()),
('linear',nn.Linear(num_inputs,num_outputs))
])
)
#然后我们使用均值为0、标准差为0.01的正态分布随机初始化模型的权重参数
init.normal_(net.linear.weight,mean=0,std=0.01)
init.constant_(net.linear.bias,val=0)
#4.softmax和交叉熵损失函数
loss=nn.CrossEntropyLoss()
#5.定义优化算法
optimizer=torch.optim.SGD(net.parameters(),lr=0.1)
#6.训练模型
num_epochs=5
pt.train(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,optimizer)