import torch
import torch.nn as nn
import torch.nn.functional as F
1、卷积网络
class Net(nn.Module):
def __init__(self,'这里放参数'):
super(Net,self).__init__()
@CNN
# 输入通道数,输出通道数/卷积核个数,卷积核尺寸
self.conv1 = nn.Cnv2d(in_channels=3,out_channels=16,kernel_size=5)
@pool
# 池化核尺寸
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Cnv2d(in_channels=16,out_channels=36,kernel_size=3)
# 池化核尺寸
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(1296,128)
self.fc2 = nn.Linear(128,10)
def forward(self,x):
# 输入数据形状:32*32*3(size*in_channels)
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1,1296)
x = F.relu(self.fc2(F.relu(self.fc1(x))))
return x
2、rnn网络、LSTM网络
class Net(nn.Module):
def __init__(self,input_size,hidden_size):
super(Net,self).__init__()
self.hidden_size = 20
@RNN
# 输入特征数10,隐藏层状态维度20,两层RNN
self.rnn = nn.RNN(input_size=10,hidden_size=self.hidden_size,num_layers=2)
@LSTM
self.lstm1 = nn.LSTM(input_size=10,hidden_size=self.hidden_size,num_layers=2)
@GRU
self.gru = torch.nn.GRU(input_size=10,hidden_size=self.hidden_size,num_layers=2)
self.fc1 = nn.Linear(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, out_num)
def forward(self,X):
# 输入数据形状:seq,batch,feature
@RNN
output,h_n = self.rnn(X)
@LSTM
r_out,(h_n,h_c) = self.lstm1(X)
@GRU
output,h_n = self.gru(X)
r_out = F.leaky_relu(self.fc1(r_out.squeeze()))
out = self.out(r_out)
return out
# 模型
model = Net(state_num,hidden_size)
# model = model.to('cuda')
# 损失
LOSS = nn.MSELoss()
# 优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, momentum=0.9)
# 训练
model.train()
for i in range(1000):
# 前向传播
y_pred = model(trainDATA)
# 计算损失
loss = LOSS(y_pred,labelDATA)
# 梯度清零,或optimizer.zero_grad()
model.zero_grad()
# 反向传播,计算w和b的梯度
loss.backward()
# 更新参数
optimizer.step()
# 保存模型
torch.save(model, 'lstm_T.pkl')
# 测试
model.eval()
# 加载模型
model = torch.load('model_T.pth')
y_pred = model(DATA)