pytorch——两个全连接神经网络的通用模型

代码中都加了详细的注释,因此文章中不再赘述。

用于分类的全连接神经网络

  • loss函数采用Cross Entropy Loss
  • 优化器采用随机梯度下降优化器(SGD)
  • 激活函数:ReLU
# -*- coding: utf-8 -*- 
# @Time : 2021/5/11 20:04 
# @Author : Liu Lihao
# @File : ClassificationANN.py


import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np

from torchvision import datasets, transforms



'''
定义神经网络结构
'''
class MLP(nn.Module):
    def __init__(self, inputSize, outputSize):
        super(MLP, self).__init__()
        '''
        定义网络结构
        修改此处可修改网络结构
        '''
        self.model = nn.Sequential(
            nn.Linear(inputSize, 200),  # 经过一个线性层
            nn.ReLU(inplace=True),  # 经过激活函数
            nn.Linear(200, 200),
            nn.ReLU(inplace=True),  # inplace=True表示进行覆盖运算,这样可以节约内存空间
            nn.Linear(200, outputSize),
            nn.ReLU(inplace=True)
        )

    "定义前传"
    def forward(self, x):
        x = self.model(x)
        return x


'''
定义神经网络功能
'''
class AllConnectedNet:
    def __init__(self, learning_rate, inputSize, outputSize):
        self.outputSize = outputSize
        self.learning_rate = learning_rate
        self.device = torch.device('cuda:0')  # 使用GPU加速,如果不使用GPU,可以将此处改为:torch.device('cpu')
        self.deDevice = torch.device('cpu')
        self.net = MLP(inputSize, outputSize).to(self.device)
        self.optimizer = optim.SGD(self.net.parameters(), lr=learning_rate)

        self.crossEntropyLoss = nn.CrossEntropyLoss().to(self.device)
        # crossEntropyloss,交叉熵,常用于分类问题

        self.MSELoss = nn.MSELoss().to(self.device)
        # MSELoss,均方误差,常用于回归问题

    '''
    前向传递
    inData:[batchSize,inputSize]
    outData:[batchSize,outputSize]
    '''
    def forwardPropagation(self,inData):
        inData = inData.to(self.device)
        outData = self.net(inData)  # 前传
        return outData

    '''
    计算loss
    outData:[batchSize,outputSize]
    targetData:[batchSize]
    loss:[] (维度为0)
    '''
    def computingLoss(self, outData, targetData):
        outData = outData.to(self.device)
        targetData = targetData.to(self.device)
        loss = self.crossEntropyLoss(outData, targetData)
        return loss

    '''
    反向传递
    outData:[batchSize,outputSize]
    targetData:[batchSize]
    loss:[] (维度为0)
    '''
    def backwardPropagation(self, outData, targetData):
        loss = self.computingLoss(outData, targetData)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss

    '''将GPU数据转为CPU数据'''
    def cudaToCpu(self,data):
        return data.to(self.deDevice)




'''
使用数字识别数据测试神经网络
'''
batch_size=200
learning_rate=0.01
epochs=10

net = AllConnectedNet(learning_rate,784,10)

train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('/data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('/data', train=False, transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])),
    batch_size=batch_size, shuffle=True)


for epoch in range(epochs):
    '''训练'''
    for batch_idx, (data, target) in enumerate(train_loader):
        data = data.view(-1, 28 * 28)
        out = net.forwardPropagation(data)  # 前传
        loss = net.backwardPropagation(out, target)  # 计算损失和更新网络梯度
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))

    '''测试'''
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        data = data.view(-1, 28 * 28)
        out = net.forwardPropagation(data)  # 前传
        test_loss += net.computingLoss(out, target).item()  # 计算loss函数,但不更新网络参数

        pred = net.cudaToCpu(out).data.max(1)[1]
        correct += pred.eq(target.data).sum()
    '''输出测试结果'''
    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))

用于拟合的全连接神经网络

  • loss函数采用MSELoss
  • 优化器采用随机梯度下降优化器(SGD)
  • 激活函数:LeakyReLU
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/5/13 19:27
# @Author  : Liu Lihao
# @File    : FittingANN.py


import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from openpyxl import load_workbook
import matplotlib.pyplot as plt


'''
定义网络结构
'''
class MLP(nn.Module):
    def __init__(self, inputSize, outputSize):
        super(MLP, self).__init__()
        '''
        定义网络结构
        修改此处可修改网络结构
        '''
        self.model = nn.Sequential(
            nn.Linear(inputSize, 50),  # 经过一个线性层
            nn.LeakyReLU(0.15, inplace=True),  # 经过激活函数
            nn.Linear(50, 50),
            nn.LeakyReLU(0.15, inplace=True),  # inplace=True表示进行覆盖运算,这样可以节约内存空间
            nn.Linear(50, 20),
            nn.LeakyReLU(0.15, inplace=True),
            nn.Linear(20, outputSize)
        )

    "定义前传"
    def forward(self, x):
        x = self.model(x)
        return x


'''
定义神经网络功能
'''
class AllConnectedNet:
    def __init__(self, learning_rate, inputSize, outputSize):
        self.outputSize = outputSize
        self.learning_rate = learning_rate
        self.device = torch.device('cuda:0')
        self.deDevice = torch.device('cpu')
        self.net = MLP(inputSize, outputSize).to(self.device)
        self.optimizer = optim.SGD(self.net.parameters(), lr=learning_rate)

        self.crossEntropyLoss = nn.CrossEntropyLoss().to(self.device)
        # crossEntropyloss,交叉熵,常用于分类问题

        self.MSELoss = nn.MSELoss(reduction='sum').to(self.device)
        # self.MSELoss = nn.MSELoss(reduction='mean').to(self.device)
        # MSELoss,均方误差,常用于回归问题

    '''
    前向传递
    inData:[batchSize,inputSize], type:torch.FloatTensor
    outData:[batchSize,outputSize]
    '''
    def forwardPropagation(self,inData):
        inData = inData.to(self.device)
        outData = self.net(inData)  # 前传
        return outData

    '''
    计算loss
    outData:[batchSize,outputSize], type:torch.FloatTensor
    targetData:[batchSize,outputSize]
    loss:[] (维度为0)
    '''
    def computingLoss(self, outData, targetData):
        outData = outData.to(self.device)
        targetData = targetData.to(self.device)
        # loss = self.crossEntropyLoss(outData, targetData)
        loss = self.MSELoss(outData, targetData)
        return loss

    '''
    反向传递
    outData:[batchSize,outputSize], type:torch.FloatTensor
    targetData:[batchSize,outputSize]
    loss:[] (维度为0)
    '''
    def backwardPropagation(self, outData, targetData):
        loss = self.computingLoss(outData, targetData)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss

    '''将GPU数据转为CPU数据'''
    def cudaToCpu(self,data):
        return data.to(self.deDevice)




'''测试'''

wb = load_workbook("../test.xlsx")#生成一个已存在的wookbook对象
wb1 = wb.active#激活sheet

batch_size=16
learning_rate=0.001
epochs=10  # 数据集训练次数


net = AllConnectedNet(learning_rate,3,2)
Loss_ = []
for epoch in range(epochs):

    batch_index = 0
    while((batch_index+1)*batch_size<wb1.max_row):
        x = []
        y = []
        for i in range(1,batch_size+1):
            x.append([
                wb1.cell(row=i + batch_index * batch_size, column=1).value,
                wb1.cell(row=i + batch_index * batch_size, column=2).value,
                wb1.cell(row=i + batch_index * batch_size, column=3).value
            ])
            y.append([
                wb1.cell(row=i + batch_index * batch_size, column=4).value,
                wb1.cell(row=i + batch_index * batch_size, column=5).value
            ])

        # print('x:', x)
        # print('y:', y)
        out = net.forwardPropagation(torch.FloatTensor(x))  # 前传
        loss = net.backwardPropagation(out, torch.FloatTensor(y))  # 计算loss函数,反向更新神经网络参数

        if(batch_index%20 == 0):
            print('batch_index', batch_index, 'loss:', loss.item())
            Loss_.append(loss.item())
        batch_index += 1

plt.plot(Loss_)
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值