import torch
import torchvision
import torchvision.transforms as transform
import torch.utils.data as Data
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
import pandas as pd
import numpy as np
from torch.autograd import Variable
# data set
train=pd.read_csv('Thirdtest.csv')
#cut 0 col as label
train_label=train.iloc[:,[0]] #只读取一列
#train_label=train.iloc[:,0:3]
#cut 1~16 col as data
train_data=train.iloc[:,1:]
#change to np
train_label_np=train_label.values
train_data_np=train_data.values
#change to tensor
train_label_ts=torch.from_numpy(train_label_np)
train_data_ts=torch.from_numpy(train_data_np)
train_label_ts=train_label_ts.type(torch.LongTensor)
train_data_ts=train_data_ts.type(torch.FloatTensor)
print(train_label_ts.shape)
print(type(train_label_ts))
train_dataset=Data.TensorDataset(train_data_ts,train_label_ts)
train_loader=DataLoader(dataset=train_dataset,batch_size=64,shuffle=True)
#make a network
import torch.nn.functional as F # 激励函数都在这
class Net(torch.nn.Module): # 继承 torch 的 Module
def __init__(self ):
super(Net, self).__init__() # 继承 __init__ 功能
self.hidden1 = torch.nn.Linear(16, 30)# 隐藏层线性输出
self.out = torch.nn.Linear(30, 3) # 输出层线性输出
def forward(self, x):
# 正向传播输入值, 神经网络分析出输出值
x = F.relu(self.hidden1(x)) # 激励函数(隐藏层的线性值)
x = self.out(x) # 输出值, 但是这个不是预测值, 预测值还需要再另外计算
return x
# net=Net()
# optimizer = torch.optim.SGD(net.parameters(), lr=0.0001,momentum=0.001)
# loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
# loss_list=[]
# for epoch in range(500):
# for step ,(b_x,b_y) in enumerate (train_loader):
# b_x,b_y=Variable(b_x),Variable(b_y)
# b_y=b_y.squeeze(1)
# output=net(b_x)
# loss=loss_func(output,b_y)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# if epoch%1==0:
# loss_list.append(float(loss))
# print( "Epoch: ", epoch, "Step ", step, "loss: ", float(loss))
# 为每个优化器创建一个 net
net_SGD = Net()
net_Momentum = Net()
net_RMSprop = Net()
net_Adam = Net()
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
#定义优化器
LR=0.0001
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR,momentum=0.001)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
loss_func = torch.nn.CrossEntropyLoss()
losses_his = [[], [], [], []]
for net, opt, l_his in zip(nets, optimizers, losses_his):
for epoch in range(500):
for step, (b_x, b_y) in enumerate(train_loader):
b_x, b_y = Variable(b_x), Variable(b_y)
b_y = b_y.squeeze(1)# 数据必须得是一维非one-hot向量
# 对每个优化器, 优化属于他的神经网络
output = net(b_x) # get output for every net
loss = loss_func(output, b_y) # compute loss for every net
opt.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
opt.step() # apply gradients
if epoch%1==0:
l_his.append(loss.data.numpy()) # loss recoder
print("optimizers: ",opt,"Epoch: ",epoch,"Step ",step,"loss: ",float(loss))
labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
for i, l_his in enumerate(losses_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.xlim((0,1000))
plt.ylim((0,4))
plt.show()
#
# for epoch in range(5):
# for step ,(b_x,b_y) in enumerate (train_loader):
# b_x,b_y=Variable(b_x),Variable(b_y)
# b_y=b_y.squeeze(1)
# output=net(b_x)
# loss=loss_func(output,b_y)
# loss.backward()
# optimizer.zero_grad()
# optimizer.step()
# print(loss)
pytorch 搭建自己的神经网络和各种优化器实例
最新推荐文章于 2024-10-11 22:52:58 发布