分类:
#coding=utf-8
import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data,1)
y0 = torch.zeros(100) #0标签
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100) #1标签
#print 'x0',
#print x0.size() #(100L,2L)
#print 'x1',y0
x = torch.cat((x0,x1),dim=0).type(torch.FloatTensor)
y = torch.cat((y0,y1),).type(torch.LongTensor)
#print 'x:',x
#print 'y:',y
print x.size()
print y.size()
x,y = Variable(x),Variable(y)
# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn')
# plt.show()
class Net(nn.Module):
def __init__(self,n_feature,n_hidden,n_output):
super(Net,self).__init__()
self.hidden = nn.Linear(n_feature,n_hidden)
self.out = nn.Linear(n_hidden,n_output)
def forward(self,x):
x = F.relu(self.hidden(x))
x = self.out(x)
return x
net = Net(2,10,2)
print net
#训练网络
optimizer = torch.optim.SGD(net.parameters(),lr = 0.02)
loss_func = torch.nn.CrossEntropyLoss()
# for i in range(100):
# out = net(x)
# loss = loss_func(out,y)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
#可视化训练过程
plt.ion()
plt.show()
for i in range(100):
out = net(x)
loss = loss_func(out,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 2 == 0:
plt.cla()
prediction = torch.max(F.softmax(out),1)[1]
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y==target_y)/200
plt.text(1.5,-4,'Accuracy=%.2f'%accuracy,fontdict={'size':20,'color':'red'})
plt.pause(0.1)
plt.ioff()
plt.show()