【老刘的任务之PyTorch学习笔记】

实现了一个简单的基于sklearn中的cifar10数据集的全连接神经网络(MLP),该数据集包含10种分类的图片数据。

构建了一个建议网络结构Multiple_Model,该网络包含4个线性操作+ReLU函数作为激活函数。

Multiple_Model2网络结构同Multiple_Model相同,不过采用了nn.Sequetial()定义网络。

网络的训练在函数Multiple_feature()中,具有数据集划分(因为是sklearn的标准数据集就没有做归一化等操作),前向传播+反向传播

实验结果就不放图了,效果不好,因为网络较为简陋

import torch
import torchvision
import torch.nn.functional as F
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch import nn

x = torch.Tensor([[1.0], [2.0], [3.0]])
y = torch.Tensor([[2.0], [4.0], [6.0]])


class LinearModel(torch.nn.Module):  # pytorch中的线性回归训练的模型,进行重构
    def __init__(self):
        super(LinearModel, self).__init__()
        self.linear = torch.nn.Linear(1, 1)

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred


class LogisticRegressionModel(torch.nn.Module):
    def __init__(self):
        super(LogisticRegressionModel, self).__init__()
        self.linear = torch.nn.Linear(1, 1)

    def forward(self, x):
        y_pred = F.sigmoid(self.linear(x))
        return y_pred


class Multiple_Model(torch.nn.Module):
    def __init__(self):
        super(Multiple_Model, self).__init__()
        self.linear0 = torch.nn.Linear(10, 8)  # 8维降到6维
        self.linear1 = torch.nn.Linear(8, 6)  # 8维降到6维
        self.linear2 = torch.nn.Linear(6, 4)  # 6维降到4维
        self.linear3 = torch.nn.Linear(4, 1)  # 4维降到1维
        self.activate = torch.nn.ReLU

    def forward(self, x_):
        x_ = self.activate(self.linear0(x))
        x_ = self.activate(self.linear1(x))
        x_ = self.activate(self.linear2(x))
        x_ = self.activate(self.linear3(x))
        # 算y^的时候要改成y = self.sigmoid(self.linear..)

    def forward(self, y_):
        y_pred = F.sigmoid(self.linear(y_))
        return y_pred


class Multiple_Model_2(torch.nn.Module):
    def __init__(self):
        super(Multiple_Model_2, self).__init__()
        self.linear_layer0 = nn.Sequential(
            torch.nn.Linear(10, 8),  # 8维降到6维
            torch.nn.Linear(8, 6),  # 8维降到6维
            torch.nn.Linear(6, 4),  # 6维降到4维
            torch.nn.Linear(4, 1),  # 4维降到1维
            torch.nn.ReLU()
        )

        def forward(self, x_):
            x_ = self.linear_layer0(x_)
            out = x
            return out

        def forward(self, y_):
            y_pred = F.sigmoid(self.linear(y_))
            return y_pred

def linear_resession():
    model = LinearModel()
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

    train_nums = 1000
    for epoch in range(train_nums):  # 训练的次数
            # 前向传播
        y_pred = model(x)  # 获取y^
        loss = criterion(y_pred, y)  # 损失函数loss function
        print(epoch, loss.item())
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    print('w = ', model.linear.weight.item())
    print('b = ', model.linear.bias.item())

    # 此处的x_test可以是获取的数据集中划分好的
    x_test = torch.Tensor([[4.0]])
    y_test = model(x_test)
    print('y_pred = ', y_test.data)

def classification():
    train_set = torchvision.datasets.CIFAR10(root='E:\\pythonProject\\resources\\cifar', train=True, download=True)
    test_set = torchvision.datasets.CIFAR10(root='E:\\pythonProject\\resources\\cifar', train=False, download=True)

    model = LogisticRegressionModel()
    #
    criterion = torch.nn.BCELoss(size_average=False)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    #
    train_nums = 1000
    for epoch in range(train_nums):  # 训练循环
        # 前向传播
        y_pred = model(x)  # 获取y^
        loss = criterion(y_pred, y)  # 损失函数loss function
        print(epoch, loss.item())
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def multi_feature():
    # 获取数据
    data = sklearn.datasets.load_diabetes()
    # 数据集划分
    x_train, x_test, y_train, y_test = train_test_split(data.data, data.target)
    # 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 使用pytorch中的模型
    model = Multiple_Model()
    criterion = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

    #
    train_nums = 1000
    for epoch in range(train_nums):  # 训练循环
        # 前向传播
        y_pred = model(x_train)  # 获取y^
        loss = criterion(y_pred, y_train)  # 损失函数loss function
        print(epoch, loss.item())
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

# 验证不知道怎么验证,不知道咋用???

if __name__ == "__main__":
    # linear_resession(x_train, y_train)
    classification()


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值