Pytorch 编码

网络定义:

__init__()函数定义网络层数设施

forward() 函数定义具体网络的计算规则

举例:(可print直接看网络结构)

class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()
        
        self.rnn = nn.RNN(
                input_size=1,
                hidden_size=32, # rnn hidden unit
                num_layers=1,  #rnn 层数
                batch_first = True, #input&output (batch_size,time_step,input_size) 
        )
        self.out = nn.Linear(32,1)
    
    def forward(self, x, h_state):
        # x(batch_size,time_step,input_size)
        # h_state(n_layers,batch,hiddden_size)
        #y (batch_size, time_step,output_size)
        
        r_out,h_state = self.rnn(x,h_state)
        
        outs = []
        for time_step in range(r_out.size(1)):
            outs.append(self.out(r_out[:,time_step,:]))
        return torch.stack(outs, dim=1), h_state
    '''
    
    def forward(self, x, h_state):
        # x(batch_size,time_step,input_size)
        # h_state(n_layers,batch,hiddden_size)
        #y (batch_size, time_step,output_size)
        
        r_out,h_state = self.rnn(x,h_state)
        r_out_reshaped = r_out.view(-1,32) # 2D
        outs = self.linear_layer(r_out_reshaped)
        outs = outs.view(-1, TIME_STEP, INPUT_SIZE) #3D
     '''   
        

    
rnn = RNN()
print (rnn)

网络训练:

选择优化器  optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)

定义损失函数  loss_func = nn.MSELoss()

以及后向训练

loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()

举例:

optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.MSELoss()

h_state = None

plt.figure(1,figsize=(12,5))
plt.ion()

for step in range(60):
    start,end = step*np.pi, (step+1)*np.pi
    steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
    x_np = np.sin(steps)
    y_np = np.cos(steps)
    
    x = torch.from_numpy(x_np[np.newaxis,:,np.newaxis])
    y = torch.from_numpy(y_np[np.newaxis,:,np.newaxis])
    
    prediction,h_state = rnn(x,h_state)
    h_state = h_state.data
    
    loss = loss_func(prediction, y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    
    plt.plot(steps, y_np.flatten(), 'r-')
    plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
    plt.draw()
    plt.pause(0.05)
    
plt.ioff()
plt.show()

举的例子为莫烦python的RNN回归代码,所以可以直接使用 nn.rnn 来构建网络。

如果使用 lstm 可用 nn.lstm , 如下

class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()

        self.rnn = nn.LSTM(     # LSTM 效果要比 nn.RNN() 好多了
            input_size=28,      # 图片每行的数据像素点
            hidden_size=64,     # rnn hidden unit
            num_layers=1,       # 有几层 RNN layers
            batch_first=True,   # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
        )

        self.out = nn.Linear(64, 10)    # 输出层

    def forward(self, x):
        # x shape (batch, time_step, input_size)
        # r_out shape (batch, time_step, output_size)
        # h_n shape (n_layers, batch, hidden_size)   LSTM 有两个 hidden states, h_n 是分线, h_c 是主线
        # h_c shape (n_layers, batch, hidden_size)
        r_out, (h_n, h_c) = self.rnn(x, None)   # None 表示 hidden state 会用全0的 state

        # 选取最后一个时间点的 r_out 输出
        # 这里 r_out[:, -1, :] 的值也是 h_n 的值
        out = self.out(r_out[:, -1, :])
        return out

rnn = RNN()
print(rnn)
"""
RNN (
  (rnn): LSTM(28, 64, batch_first=True)
  (out): Linear (64 -> 10)
)
"""

如果不使用像RNN和LSTM这样定义的网络,想自己定义网络,可以用 nn.Sequential 一层一层描述

lass AutoEncoder(nn.Module):
    def __init__(self):
        super(AutoEncoder, self).__init__()

        # 压缩
        self.encoder = nn.Sequential(
            nn.Linear(28*28, 128),
            nn.Tanh(),
            nn.Linear(128, 64),
            nn.Tanh(),
            nn.Linear(64, 12),
            nn.Tanh(),
            nn.Linear(12, 3),   # 压缩成3个特征, 进行 3D 图像可视化
        )
        # 解压
        self.decoder = nn.Sequential(
            nn.Linear(3, 12),
            nn.Tanh(),
            nn.Linear(12, 64),
            nn.Tanh(),
            nn.Linear(64, 128),
            nn.Tanh(),
            nn.Linear(128, 28*28),
            nn.Sigmoid(),       # 激励函数让输出值在 (0, 1)
        )

    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return encoded, decoded

autoencoder = AutoEncoder()

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值