1、构造数据,搭建网络
torch.manual_seed(1) # reproducible
# 假数据
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
def save():
# 建网络
net1 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1)
)
optimizer = torch.optim.SGD(net1.parameters(), lr=0.5)
loss_func = torch.nn.MSELoss()
# 训练
for t in range(100):
prediction = net1(x)
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
2、法一: 保存和提取
(这种方式将会保存提取整个神经网络, 神经网络大的时候可能会比较慢)
torch.save(net1, 'net.pkl') # 保存整个网络
def restore_net():#提取整个网络
# restore entire net1 to net2
net2 = torch.load('net.pkl')
prediction = net2(x)
3、法二: 保存和提取
(只保存网络中的参数 ,速度快, 占内存少)
torch.save(net1.state_dict(), 'net_params.pkl') #只保存网络中的参数
#只提取网络参数,这种方式将会提取所有的参数, 然后再放到你的新建网络中.
def restore_params():
# 新建 net3
net3 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1)
)
# 将保存的参数复制到 net3
net3.load_state_dict(torch.load('net_params.pkl'))
prediction = net3(x)