图 代表了选择不同的隐含层个数对网络的影响
图 DROPOUT大小
lr的影响
adam优化器 ,不同的lr参数
# 神经网络的搭建--分类任务 #
import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F # 激励函数都在这
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# x0,x1是数据,y0,y1是标签
n_data = torch.ones(200, 2) # 数据的基本形态
x0 = torch.normal(1.65*n_data, 1) # 类
y0 = torch.zeros(200) # 类
x1 = torch.normal(-1.65*n_data, 1) # 类
y1 = torch.ones(200) #
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # ting
y = torch.cat((y0, y1), ).type(torch.LongTensor) # LongTensor = 64-bit integer
# 建立神经网络
class Net(torch.nn.Module): # 继承 torch 的 Module
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__() # 继承 __init__ 功能
self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层线性输出
self.out = torch.nn.Linear(n_hidden, n_output) # 输出层线性输出
def forward(self, x):
# 正向传播输入值, 神经网络分析出输出值
x = F.relu(self.hidden(x)) # 激励函数(隐藏层的线性值)
x = self.out(x) # 输出值, 但是这个不是预测值, 预测值还需要再另外计算
return x
ee=[10,30,50,70,150,160,70]
for ii in range(6):
net = Net(n_feature=2, n_hidden=ee[ii], n_output=2) # 几个类别就几个 output
n_hidden=ee[ii]
# 训练网络
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
# 算误差的时候, 注意真实值!不是! one-hot 形式的, 而是1D Tensor, (batch,)
# 但是预测值是2D tensor (batch, n_classes)
loss_func = torch.nn.CrossEntropyLoss()
#plt.ion() # 画图
#plt.show()
plt.subplot(2,3,ii+1)
for t in range(300):
out = net(x) #
loss = loss_func(out, y) # 计算两者的误差
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 可视化展示
if t % 12 == 0:
#plt.cla()
# 过了一道 softmax 的激励函数后的最大概率才是预测值
prediction = torch.max(F.softmax(out), 1)[1]
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y) / 400. # 计算准确度
plt.text(3, 4, 'Acc=%.2f' % accuracy, fontdict={'size': 10, 'color': 'green'})
plt.text(3, 2, 'hidden number=%.0f' % n_hidden, fontdict={'size': 10, 'color': 'red'})
#plt.pause(0.1)
#plt.ioff() # 停止画图
plt.show()
ee=[10/100,30/100,50/100,70/100,1/100,160/100,70/100]#/100
for ii in range(6):
net = Net(n_feature=2, n_hidden=30, n_output=2) # 几个类别就几个 output
n_hidden1=ee[ii]
# 训练网络
optimizer = torch.optim.SGD(net.parameters(), lr=n_hidden1)
# 算误差的时候, 注意真实值!不是! one-hot 形式的, 而是1D Tensor, (batch,)
# 但是预测值是2D tensor (batch, n_classes)
loss_func = torch.nn.CrossEntropyLoss()
loss_func = torch.nn.CrossEntropyLoss()
#optimizer = torch.optim.Adam(cnn_model.parameters(),lr=learning_rate)
#plt.ion() # 画图
#plt.show()
plt.subplot(2,3,ii+1)
for t in range(300):
out = net(x) #
loss = loss_func(out, y) # 计算两者的误差
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 可视化展示
if t % 12 == 0:
#plt.cla()
# 过了一道 softmax 的激励函数后的最大概率才是预测值
prediction = torch.max(F.softmax(out), 1)[1]
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y) / 400. # 计算准确度
plt.text(3, 4, 'Acc=%.2f' % accuracy, fontdict={'size': 10, 'color': 'green'})
plt.title('SGDlr=%.2f' % n_hidden1, fontdict={'size': 10, 'color': 'red'})
#plt.text(3, 2, 'lr=%.2f' % n_hidden1, fontdict={'size': 20, 'color': 'red'})
#plt.pause(0.1)
#plt.ioff() # 停止画图
plt.show()
ee=[10/100,30/100,50/100,70/100,1/100,160/100,70/100]#/100
for ii in range(6):
net = Net(n_feature=2, n_hidden=30, n_output=2) # 几个类别就几个 output
n_hidden1=ee[ii]
# 训练网络
#optimizer = torch.optim.SGD(net.parameters(), lr=n_hidden1)
# 算误差的时候, 注意真实值!不是! one-hot 形式的, 而是1D Tensor, (batch,)
# 但是预测值是2D tensor (batch, n_classes)
loss_func = torch.nn.CrossEntropyLoss()
loss_func = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),lr=n_hidden1)
#plt.ion() # 画图
#plt.show()
plt.subplot(2,3,ii+1)
for t in range(300):
out = net(x) #
loss = loss_func(out, y) # 计算两者的误差
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 可视化展示
if t % 12 == 0:
#plt.cla()
# 过了一道 softmax 的激励函数后的最大概率才是预测值
prediction = torch.max(F.softmax(out), 1)[1]
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y) / 400. # 计算准确度
plt.text(3, 4, 'Acc=%.2f' % accuracy, fontdict={'size': 10, 'color': 'green'})
plt.title('Adamlr=%.2f' % n_hidden1, fontdict={'size': 10, 'color': 'red'})
#plt.text(3, 2, 'lr=%.2f' % n_hidden1, fontdict={'size': 20, 'color': 'red'})
#plt.pause(0.1)
#plt.ioff() # 停止画图
plt.show()
ee=[10/100,30/100,50/100,70/100,40/100,60/100,70/100]#/100
for ii in range(6):
# 建立神经网络
n_hidden1=ee[ii]
class Net(torch.nn.Module): # 继承 torch 的 Module
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__() # 继承 __init__ 功能
self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层线性输出
self.out = torch.nn.Linear(n_hidden, n_output) # 输出层线性输出
self.dropout = torch.nn.Dropout(p=n_hidden1) # dropout训练
def forward(self, x):
# 正向传播输入值, 神经网络分析出输出值
x = F.relu(self.hidden(x)) # 激励函数(隐藏层的线性值)
x = self.out(x) # 输出值, 但是这个不是预测值, 预测值还需要再另外计算
return x
net = Net(n_feature=2, n_hidden=30, n_output=2) # 几个类别就几个 output
# 训练网络
#optimizer = torch.optim.SGD(net.parameters(), lr=n_hidden1)
# 算误差的时候, 注意真实值!不是! one-hot 形式的, 而是1D Tensor, (batch,)
# 但是预测值是2D tensor (batch, n_classes)
loss_func = torch.nn.CrossEntropyLoss()
loss_func = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),lr=n_hidden1)
#plt.ion() # 画图
#plt.show()
plt.subplot(2,3,ii+1)
for t in range(300):
out = net(x) #
loss = loss_func(out, y) # 计算两者的误差
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 可视化展示
if t % 12 == 0:
#plt.cla()
# 过了一道 softmax 的激励函数后的最大概率才是预测值
prediction = torch.max(F.softmax(out), 1)[1]
pred_y = prediction.data.numpy().squeeze()
target_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = sum(pred_y == target_y) / 400. # 计算准确度
plt.text(3, 4, 'Acc=%.2f' % accuracy, fontdict={'size': 10, 'color': 'green'})
plt.title('droppout=%.2f' % n_hidden1, fontdict={'size': 10, 'color': 'red'})
#plt.text(3, 2, 'lr=%.2f' % n_hidden1, fontdict={'size': 20, 'color': 'red'})
#plt.pause(0.1)
#plt.ioff() # 停止画图
plt.show()