Pytorch 之神经网络

加载数据——Data Loader

import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import os
import sys
path = os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[0] + os.path.sep
path = path[:-10] + '/data/'
#/********** Begin *********/
# 下载训练集 MNIST 训练集,设置 root = path,train=False ,download=False,赋值给变量train_dataset
train_dataset = dsets.MNIST(root= path,
                            train=False,
                            transform=transforms.ToTensor(),
                            download=False)
# 创建batch_size=100, shuffle=True的DataLoader变量data_loader
data_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=100,
                                           shuffle=True)
# 获取数据集中的第四个元素的图片信息和类别
print('Number of samples: ', len(train_dataset))
img, target = train_dataset[3] # load 4th sample
#按照格式“Image Size: ...”输出该图片大小
print("Image Size: ", img.size())
#按照格式“Image Target: ...”输出该图片的类别
print("Image Target: ",target)
#/********** End *********/

建立模型,定义损失和优化函数

import torch 
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# CNN Model (2 conv layer)
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=5, padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2))
        #/********** Begin *********/
        self.layer2 = nn.Sequential(
            #定义卷积层Conv2d:输入16张特征图,输出32张特征图,卷积核5x5,padding为2
            nn.Conv2d(16, 32, kernel_size=5, padding=2),
            #定义BatchNorm2d层,参数为32
            nn.BatchNorm2d(32),
            #定义非线性层ReLU
            nn.ReLU(),
            #定义2x2窗口的最大池化层MaxPool2d
            nn.MaxPool2d(2))
        # 定义全连接层:线性连接(y = Wx + b),7*7*32个节点连接到10个节点上
        self.fc = nn.Linear(7*7*32, 10)
    def forward(self, x):
        out = self.layer1(x)
        # 输入out->layer2->更新到out
        out = self.layer2(out)
        #view函数将张量x变形成一维向量形式,总特征数不变,为全连接层做准备
        out = out.view(out.size(0), -1)
        # 输入out->fc,更新到out
        out = self.fc(out)
        return out
    #/********** End *********/
cnn = CNN()
# 返回参数值:顺序和下面的named一致
params = list(cnn.parameters())
print(len(params))
# net.named_parameters(): ((参数名称,参数属性),……)
for name, parameters in cnn.named_parameters():
    print(name, ":", parameters.size())

训练模型

import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import os
import sys
path = os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[0] + os.path.sep
#rootpath = path[:10] + '/data/'
#print(path)
# Hyper Parameters
batch_size = 100
learning_rate = 0.001
num_epochs = 1
# MNIST Dataset
train_dataset = dsets.MNIST(root='./data/',
                            train=True,
                            transform=transforms.ToTensor(),
                            download=False)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)
# CNN Model (2 conv layer)
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.layer1 = nn.Sequential(
                                    nn.Conv2d(1, 16, kernel_size=5, padding=2),
                                    nn.BatchNorm2d(16),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
                                    nn.Conv2d(16, 32, kernel_size=5, padding=2),
                                    nn.BatchNorm2d(32),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2))
        self.fc = nn.Linear(7*7*32, 10)
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out
cnnmodel = CNN()
#创建输出文件 output.txt
f = open(path + 'output.txt', 'w')
f.seek(0)
f.truncate()   #清空文件
#/********** Begin *********/
# 声明一个为交叉熵损失函数的变量criterion
criterion = nn.CrossEntropyLoss()
# 声明一个为Adam优化函数的变量optimizer,传入 cnn的参数,并使学习率lr为0.001
optimizer = torch.optim.Adam(cnnmodel.parameters(), lr=learning_rate)
# 训练模型
for i, (images, labels) in enumerate(train_loader):
    # 将images,labels数据转换为Variable类型
    images = Variable(images)
    labels = Variable(labels)
    # optimizer梯度归零
    optimizer.zero_grad()
    # 对 images 应用 cnnmodel 模型并赋值给变量 outputs
    outputs = cnnmodel(images)
    loss = criterion(outputs, labels)
    #Backward
    loss.backward()
    #Optimize
    optimizer.step()
    #共训练60次,分别100次输出一回loss信息,并将输出信息存到文件中
    if (i+1) % 10 == 0:
        f.writelines('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f \n'
                     %(1, num_epochs, i+1, len(train_dataset)//1000, loss.data[0]))
        print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
            %(1, num_epochs, i+1, len(train_dataset)//1000, loss.data[0]))
    if i > 60:
        break
f.close()
#/********** End *********/

测试保存模型

import torch
import torch.nn as nn
import torchvision.datasets as dsets
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torch.autograd import Variable
import warnings
from PIL import Image
warnings.filterwarnings('ignore')
import os,sys
path = os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[0] + os.path.sep
rootpath = path[:-10]
#print("validation path:" ,root)
# MNIST Dataset
test_dataset = dsets.MNIST(root='./data/',
                           train=False,
                           transform=transforms.ToTensor(),
                           download=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=100,
                                          shuffle=True)
# CNN Model (2 conv layer)
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.layer1 = nn.Sequential(
                                    nn.Conv2d(1, 16, kernel_size=5, padding=2),
                                    nn.BatchNorm2d(16),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2))
        self.layer2 = nn.Sequential(
                                    nn.Conv2d(16, 32, kernel_size=5, padding=2),
                                    nn.BatchNorm2d(32),
                                    nn.ReLU(),
                                    nn.MaxPool2d(2))
        self.fc = nn.Linear(7*7*32, 10)
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out
cnnmodel = CNN()
#加载整个模型
cnnmodel = torch.load( rootpath + 'src/step3/cnnModel.pkl')
#/********** Begin *********/
# Change model to 'eval' mode
cnnmodel.eval()
correct = 0
total = 0
i = 0
for images, labels in test_loader:
    images = Variable(images)
    #对images 应用cnn模型,将结果赋值给 outputs
    outputs = cnnmodel(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()
    i += 1
    # 为了节约时间, 我们测试时只测试前10个
    if  i> 10 :
        break
#输出正确率correct/total 的百分比
print('Test Accuracy of the model on the 200 test images: %d %%' % (100 * correct / total))
#/********** End *********/
#/********** End *********/
  • 1
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值