人工智能第三次作业
import os import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' n_data = torch.ones(100, 2) x0 = torch.normal(2 * n_data, 1) y0 = torch.zeros(100) x1 = torch.normal(-2 * n_data, 1) y1 = torch.ones(100) x2 = torch.normal(torch.Tensor([[0, -3]] * 100), 1) y2 = torch.ones(100) * 2 x = torch.cat((x0, x1, x2), 0).type(torch.FloatTensor) y = torch.cat((y0, y1, y2)).type(torch.LongTensor) x, y = Variable(x), Variable(y) class Net(torch.nn.Module): def __init__(self, n_features, n_hidden, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_features, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, x): x = F.relu(self.hidden(x)) x = self.predict(x) return x net = Net(2, 10, 3) print(net) optimizer = torch.optim.SGD(net.parameters(), lr=0.02) loss_func = torch.nn.CrossEntropyLoss() plt.ion() plt.show() for t in range(100): out = net(x) loss = loss_func(out, y) optimizer.zero_grad() loss.backward() optimizer.step() if t % 2 == 0: plt.cla() prediction = torch.max(F.softmax(out, dim=1), 1)[1] pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn') accuracy = sum(pred_y == target_y) / 300. plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'}) plt.pause(0.1) plt.ioff() plt.show()