- 人工神经网络 (ANN):
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(100, 64) self.fc2 = nn.Linear(64, 10) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net()
- 卷积神经网络 (CNN):
class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.fc1 = nn.Linear(32 * 16 * 16, 128) # 如果输入图像是32x32x3 self.fc2 = nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool(x) x = x.view(-1, 32 * 16 * 16) # 展平处理 x = F.relu(self.fc1(x)) x = self.fc2(x) return x cnn = CNN()
- 循环神经网络 (RNN):
class RNN(nn.Module): def __init__(self): super(RNN, self).__init__() self.rnn = nn.RNN(input_size=10, hidden_size=50, num_layers=2, batch_first=True) self.fc = nn.Linear(50, 1) def forward(self, x, h0): x, hn = self.rnn(x, h0) # 默认h0为全0 x = self.fc(x[:, -1, :]) # 只取序列最后一个时间点的输出 return x rnn = RNN()
- 长短期记忆网络 (LSTM):
class LSTMNet(nn.Module): def __init__(self): super(LSTMNet, self).__init__() self.lstm = nn.LSTM(input_size=10, hidden_size=50, num_layers=2, batch_first=True) self.fc = nn.Linear(50, 1) def forward(self, x, h0): x, (hn, cn) = self.lstm(x, h0) # h0、c0默认为全0 x = self.fc(x[:, -1, :]) return x lstm = LSTMNet()
- 自编码器 (AE):
class Autoencoder(nn.Module): def __init__(self): super(Autoencoder, self).__init__() self.encoder = nn.Linear(784, 128) self.decoder = nn.Linear(128, 784) def forward(self, x): x = F.relu(self.encoder(x)) x = torch.sigmoid(self.decoder(x)) return x autoencoder = Autoencoder()
-
生成对抗网络 (GAN):
# 判别器定义 class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 1) def forward(self, x): x = F.leaky_relu(self.fc1(x), 0.2) x = F.dropout(x, 0.3) x = F.leaky_relu(self.fc2(x), 0.2) x = F.dropout(x, 0.3) return torch.sigmoid(self.fc3(x)) # 生成器定义 class Generator(nn.Module): def __init__(self, z_dim): super(Generator, self).__init__() self.fc1 = nn.Linear(z_dim, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 784) def forward(self, x): x = F.leaky_relu(self.fc1(x), 0.2) x = F.leaky_relu(self.fc2(x), 0.2) return torch.tanh(self.fc3(x)) z_dim = 100 D = Discriminator() G = Generator(z_dim)
-
变分自编码器 (VAE) :
# VAE定义 class VariationalAutoencoder(nn.Module): def __init__(self, z_dim): super(VariationalAutoencoder, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc2_mu = nn.Linear(400, z_dim) self.fc2_logvar = nn.Linear(400, z_dim) self.fc3 = nn.Linear(z_dim, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc2_mu(h1), self.fc2_logvar(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar z_dim = 20 vae = VariationalAutoencoder(z_dim)
-
强化学习 (RL) 的一个简单 DQN(Deep Q-Network):
# DQN定义 class DQN(nn.Module): def __init__(self, input_dim, hidden_dim, action_space): super(DQN, self).__init__() self.fc1 = nn.Linear(input_dim, hidden_dim) self.fc2 = nn.Linear(hidden_dim, hidden_dim) self.fc3 = nn.Linear(hidden_dim, action_space) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) return self.fc3(x) input_dim = 4 # 例如在CartPole-v1环境中的状态空间维度 hidden_dim = 128 action_space = 2 # CartPole-v1动作空间(左或右) dqn = DQN(input_dim, hidden_dim, action_space)
使用 PyTorch 框架来实现简单的神经网络的基本代码示意。这里的代码只提供了每种类型网络的核心结构,还需要进一步编写代码来加载数据、训练模型、测试模型等。
于 2024-02-27 16:31:03 首次发布