pytorch 实现ANN(代码正确,注释完整)

pytirch FCNN style网络搭建

import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
import pandas as pd
from sklearn.model_selection import train_test_split

#注意此处类型转化为float,不然后面求导会报错
train = pd.read_csv('digit-recognizer/train.csv', dtype=np.float32)

#获取x,y
y = train.label.values
x = train.loc[:, train.columns!='label'].values / 255

#注意是这四个顺序,获得训练集测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2019)

#转化为tensor,注意这里y要转为longtensor,因为是进行交叉熵loss计算
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train).type(torch.LongTensor)

x_test = torch.from_numpy(x_test)
y_test = torch.from_numpy(y_test).type(torch.LongTensor)

#batch size 和 轮数
batch_size = 128
iteration_num = 100


'''
torch.utils.data.TensorDataset用于将训练集x,y合并
'''
train = torch.utils.data.TensorDataset(x_train, y_train)
test = torch.utils.data.TensorDataset(x_test, y_test)

'''
DataLoader用于随机播放和批量处理数据。
它可用于与多处理工作程序并行加载数据
在dataset基础上多了batch_size, shuffle等操作
'''
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True)



class ANNModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(ANNModel, self).__init__()
		#定义层
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.relu1 = nn.ReLU() #nn.Linear为线性关系,加上激活函数转为非线性
        
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.relu2 = nn.ReLU()
        
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        
        
    def forward(self, x):
        out = self.fc1(x)
        out = self.relu1(out)
        out = self.fc2(out)
        out = self.relu2(out)
        out = self.fc3(out)
        return out
    
input_dim = 28*28   #一维向量长度
hidden_dim = 128 #hidden layer 神经元个数
output_dim = 10  #10个类

model = ANNModel(input_dim, hidden_dim, output_dim)
CrossEntropyLoss = nn.CrossEntropyLoss()

loss_list = []

learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)


for iteration in range(iteration_num):
    for j, (images, labels) in enumerate(train_loader):
        #将其转化为变量
        train = Variable(images.view(-1, 28*28))
        labels = Variable(labels)
        
        optimizer.zero_grad()
        
        outputs = model(train)
        loss = CrossEntropyLoss(outputs, labels)
        loss.backward()
        optimizer.step()
       
        '''
        验证集accuracy计算
        '''
        if j % 50 == 0:
            
            correct = 0
            total = 0
            
            for images, labels in test_loader:
                test = Variable(images.view(-1, 28*28))
                
                outputs = model(test)
                prediction = torch.max(outputs.data, 1)[1]
                
                total += len(labels)
                correct += (prediction == labels).sum()
            
            accuracy = 100 * correct / float(total)
            loss_list.append(loss.data)
            
    if iteration % 50 ==0:
        print('Epoch:{} Loss:{} Accuracy:{}'.format(iteration, loss.data, accuracy))
            
  • 8
    点赞
  • 62
    收藏
    觉得还不错? 一键收藏
  • 9
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值