# -*- coding: gbk -*-
import torch
import pandas as pd
import torch.nn as nn
import torch.utils.data as Data
import torchvision # 数据库模块
import matplotlib.pyplot as plt
import torch.nn.functional as F # 激励函数都在这
import numpy as np
import csv
LR=0.01
BATCH_SIZE=25
tem=pd.read_csv("wine.csv",sep=',')
tem=np.array(tem.iloc[:,:]).reshape(177,14)
np.random.shuffle(tem)
Row=177
Half=80
Col=14
x0_train=tem[0:Half,1:Col].reshape(Half,13)
y0_train=tem[0:Half,0]
x0_test=tem[Half:Row,1:Col].reshape(Row-Half,13)
y0_test=tem[Half:Row,0]
x=torch.FloatTensor(x0_train)
xx=torch.FloatTensor(x0_test)
y=torch.FloatTensor(y0_train)
yy=torch.FloatTensor(y0_test)
y=y.long()
yy=yy.long()
y=y-1
yy=yy-1
net = torch.nn.Sequential(
torch.nn.Linear(13, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 3),
)
optimizer= torch.optim.Adam(net.parameters(), lr=LR, betas=(0.9, 0.99),weight_decay=1e-5)
loss_func = torch.nn.CrossEntropyLoss()
torch_dataset = Data.TensorDataset(x,y)
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
)
for t in range(3000):
cnt=0
for step, (batch_x, batch_y) in enumerate(loader): # 每一步 loader 释放一小批数据用来学习
out = net(batch_x) # 喂给 net 训练数据 x, 输出预测值
loss = loss_func(out, batch_y) # 计算两者的误差
optimizer.zero_grad() # 清空上一步的残余更新参数值
loss.backward() # 误差反向传播, 计算参数更新值
optimizer.step() # 将参数更新值施加到 net 的 parameters 上
if(t%250==0):
ac_sum = 0
out = net(xx)
for i in range(Row-Half):
goal = yy[i].item()
if out[i][0] > out[i][1] and out[i][0] > out[i][2]:
if (goal == 0):
ac_sum += 1
elif out[i][1] > out[i][0] and out[i][1] > out[i][2]:
if (goal == 1):
ac_sum += 1
else:
if (goal == 2):
ac_sum += 1
print("轮数: ",t," ","正确率: ", ac_sum, " ", ac_sum / float(Row-Half))
torch.save(net,'wine_net.pkl')