import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
# 生成三分类数据
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1) # 类别0的数据,100个样本
y0 = torch.zeros(100) # 类别0的标签为0
x1 = torch.normal(-2*n_data, 1) # 类别1的数据,100个样本
y1 = torch.ones(100) # 类别1的标签为1
x2 = torch.normal(0, 2*n_data) # 类别2的数据,100个样本
y2 = torch.ones(100) * 2 # 类别2的标签为2
x = torch.cat((x0, x1, x2), dim=0).type(torch.FloatTensor) # 将三个类别的数据合并
y = torch.cat((y0, y1, y2), dim=0).type(torch.LongTensor) # 将三个类别的标签合并
# 定义分类模型
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.out = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.out(x)
return x
net = Net(n_feature=2, n_hidden=40, n_output=3)
# 定义优化器和损失函数
optimizer = torch.optim.SGD(net.parameters(), lr=0.05)
loss_func = torch.nn.CrossEntropyLoss()
# 开始画图
plt.ion()
plt.show()
# 训练模型
for t in range(100):
out = net(x)
loss = loss_func(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 可视化
if t % 2 == 0:
plt.cla()
pred = torch.max(F.softmax(out), 1)[1] # 得到预测结果
pred_y = pred.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlBu')
accuracy = sum(pred_y == target_y) / 300
plt.text(1, -5, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()