import torch from torch.autograd import Variable import matplotlib.pyplot as plt n_data = torch.ones(100, 2) # print("n_data is %s" % n_data) x0 = torch.normal(2*n_data, 1) y0 = torch.zeros(100) x1 = torch.normal(-2*n_data,1) y1 = torch.ones(100) x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # print("x is %s" % x) # print(x.shape) y = torch.cat((y0, y1), ).type(torch.LongTensor) x, y = Variable(x), Variable(y) plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn') plt.show() class Net(torch.nn.Module): def __init__(self, n_features, n_hiddens, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_features, n_hiddens) self.predict = torch.nn.Linear(n_hiddens, n_output) def forward(self,x): x = self.hidden(x) x = self.predict(x) return x net = Net(2, 10, 2) plt.ion() plt.show() optimizers = torch.optim.SGD(net.parameters(), lr=0.02) loss_func = torch.nn.CrossEntropyLoss() #计算的是概率 for t in range(100): out = net(x)#这里输出的不是类别0/1 也不是概率 是上计算出来的值 loss = loss_func(out, y) optimizers.zero_grad() loss.backward() optimizers.step() if t%2 ==0: plt.cla() prediction = torch.max(torch.softmax(out), 1)[1]#[1]返回的是位置 [0]返回的是索引值 pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn') accuracy = sum(pred_y == target_y) / 200. plt.text(0.5, 1, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'}) plt.pause(0.01) plt.ioff() plt.show()
pytorch(2) 0/1简单分类
最新推荐文章于 2024-07-06 20:34:55 发布