我们学校用().max犯法,没办法只好用神经网络来尝试实现(bushi)
其实主要目的还是简单做一个torch的神经网络的简单步骤介绍,我是感觉比Pytorch官网的tutorial简洁明了多了。。。
灵感来源知乎,a = [2,3,9,1,0],找出其中最大的一个数
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as Data
# 注意到给出的数组长度为5,且最大值不超过10。于是可以以长度为5,最大值为10的参数构建数据集:
array_length = 5
array_upper_limit = 10
num_examples = 1000
# 生成1000组数据的训练集,标签为one-hot向量,在原数组取到最大值时为1,其余时候均为零
features = torch.randint(0, array_upper_limit, (num_examples, array_length))
labels = torch.tensor([[0 if j != torch.max(i) else 1 for j in i] for i in features])
# 读取数据
batch_size = 1
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
# 定义神经网络
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 第1个一维卷积层
self.conv1 = nn.Conv1d(1, 6, 2)
# 第2个一维卷积层
self.conv2 = nn.Conv1d(6, 10, 2)
# 全连层
self.linear2 = nn.Linear(10 * (array_length - 2), array_length)
def forward(self, x):
x = x.unsqueeze(1)
x = x.float()
x = self.conv1(x)
x = nn.functional.relu(x)
x = self.conv2(x)
x = nn.functional.relu(x)
x = self.linear2(x.view(-1, 10 * (array_length - 2)))
return x
net = Net()
# 采用交叉熵损失函数
loss = nn.MSELoss()
# 采用随机梯度下降法进行优化,学习率为0.001
optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
# 训练神经网络
num_epochs = 10
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in data_iter:
optimizer.zero_grad()
y_hat = net(X)
l = loss(y_hat, y.float()).sum()
l.backward()
optimizer.step()
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax() == y).sum().item()
n += y.shape[0]
print(f"epoch {epoch + 1}, loss {train_l_sum:f}")
# 约30秒后训练完毕。让我们把训练好的模型封装成函数:
def torch_maximum(array):
return array[net(torch.tensor(array).view(-1, 5)).argmax()]
# 测试!
print(torch_maximum([2, 3, 9, 1, 0]))