利用pytorch搭建三层全连接神经网络,cifar10验证集上正确率55.3%

import torch
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import sampler
%matplotlib inline
from torch import nn,optim
from torch.utils.data import DataLoader
from torchvision import datasets,transforms

NUM_TRAIN = 49000

#定义基本网络结构
# class Fc_Net(nn.Module):
#     def __init__(self,in_dim,n_hidden_1,n_hidden_2,out_dim):
#         super(Fc_Net,self).__init__()
#         self.layer1=nn.Sequential(nn.Linear(in_dim,n_hidden_1),nn.BatchNorm1d(n_hidden_1),nn.ReLU(True))
#         self.layer2=nn.Sequential(nn.Linear(n_hidden_1,n_hidden_2),nn.BatchNorm1d(n_hidden_2),nn.ReLU(True))
#         self.layer3=nn.Sequential(nn.Linear(n_hidden_2,out_dim))
#     def forword(self,x):
#         x=self.layer1(x)
#         x=self.layer2(x)
#         x=self.layer3(x)
#         return x
    
class Fc_Net(nn.Module):
    def __init__(self,in_dim,n_hidden_1,n_hidden_2,out_dim):
        super(Fc_Net,self).__init__()
        self.layer1=nn.Sequential(nn.Linear(in_dim,n_hidden_1),nn.BatchNorm1d(n_hidden_1),nn.ReLU(True))
        self.layer2=nn.Sequential(nn.Linear(n_hidden_1,n_hidden_2),nn.BatchNorm1d(n_hidden_2),nn.ReLU(True))
        self.layer3=nn.Sequential(nn.Linear(n_hidden_2,out_dim))
    def forword(self,x):
        x=self.layer1(x)
        x=self.layer2(x)
        x=self.layer3(x)
        return x
    
class CifarNet(nn.Module):
    
    def __init__(self, input_size, hidden1_size, hidden2_size, num_classes):
        
        super(CifarNet, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden1_size)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(hidden1_size, hidden2_size)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(hidden2_size, num_classes)  
    
    def forward(self, x):
        out = self.fc1(x)
        out = self.relu1(out)
        out = self.fc2(out)
        out = self.relu2(out)
        out = self.fc3(out)
        return out

# Our model

#定义一些超参数
#import net
batch_size=128
learning_rate=1e-3
#预处理
transform=transforms.Compose(
[transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])#将图像转化成tensor,然后继续标准化,就是减均值,除以方差

#读取数据集
# train_dataset=datasets.CIFAR10(root='./data',train=True,transform=data_tf,download=True)
# test_dataset=datasets.CIFAR10(root='./data',train=False,transform=data_tf)
train_dataset = datasets.CIFAR10(root='./vclab/datasets',
                                             train=True,
                                             transform=transform,
                                             download=True)
val_dataset = datasets.CIFAR10(root='./vclab/datasets',
                                           train = True,
                                           download = True,
                                           transform = transform)
test_dataset = datasets.CIFAR10(root='./vclab/datasets',
                                            train=False,
                                            transform=transforms.ToTensor())

#使用内置的函数导入数据集
# train_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
# test_loader=DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batch_size,
                          sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
loader_val = DataLoader(val_dataset,
                        batch_size=64,
                        sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=batch_size,
                         shuffle=False)

#导入网络,定义损失函数和优化方法
model=CifarNet(3072,128,64,10)
if torch.cuda.is_available():#是否使用cuda加速
    model=model.cuda()
criterion=nn.CrossEntropyLoss()
optimizer=optim.Adam(model.parameters(),weight_decay=0.001)
# import net
n_epochs=50
train_loss = []
test_loss = [] 
train_accuracy = []
val_accuracy = []
test_accuracy = []
for epoch in range(n_epochs):
    running_loss=0.0
    running_correct=0
    print("epoch {}/{}".format(epoch,n_epochs))
    print("-"*10)
    n=0
    train_acc=0
    val_acc=0
    train_total=0
    val_total=0
    for data in train_loader:
        img,label=data
        img=img.view(img.size(0),-1)
#         img=img.T
#         print(type(img))
#         print(np.shape(img))
        if torch.cuda.is_available():
            img=img.cuda()
            label=label.cuda()
        else:
            img=Variable(img)
            label=Variable(label)
        model.train()
        out=model(img)#得到前向传播的结果
#         predict=torch.max(out,1)[1].data.squeeze()
        _, predicted = torch.max(out.data, 1)
        train_acc += (predicted == label).sum().item()
        train_total += label.size(0)
#         sum_acc+=(predict==label).sum().item()/label.size(0)
#         n+=1
        loss=criterion(out,label)#得到损失函数
        print_loss=loss.item()
        optimizer.zero_grad()#归0梯度
        loss.backward()#反向传播
        optimizer.step()#优化
        running_loss+=loss.item()
#         epoch+=1
#         if epoch%50==0:
#             print('epoch:{},loss:{:.4f}'.format(epoch,loss.data.item()))
    train_accuracy.append(train_acc/train_total)
    print(max(train_accuracy))
    for data in loader_val:
        img,label=data
        img=img.view(img.size(0),-1)
        if torch.cuda.is_available():
            img=Variable(img,volatile=True).cuda()
            label=Variable(label,volatile=True).cuda()
        else:
            img=Variable(img,volatile=True)
            label=Variable(label,volatile=True)
        model.eval()
        out=model(img)
        loss=criterion(out,label)
#         eval_loss+=loss.item()*label.size(0)
#         predict=torch.max(out,1)[1].data.squeeze()
        _, predicted = torch.max(out.data, 1)
        val_acc += (predicted == label).sum().item()
        val_total += label.size(0)

#         sum_acc+=(predict==label).sum().item()/label.size(0)
#         n+=1
#         val_acc+=num_correct.item()
    val_accuracy.append(val_acc/val_total)
    print(max(val_accuracy))
        
    

 

  • 2
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
好的,我可以回答你的问题。首先,你需要导入 PyTorch 库和其他必要的库,例如 pandas 和 numpy。然后,你应该准备你的数据,这里假设你有一个名为 data.csv 的数据集,其中包含分类变量和目标变量。接下来,你可以使用 pandas 库读取 csv 文件并将数据分成输入特征和目标变量。然后,你需要将分类变量转换为数字标签。这可以通过使用 LabelEncoder 类来完成,该类将每个分类变量映射到一个唯一的数字标签。接下来,你需要将数据集分成训练集和测试集。这可以通过使用 train_test_split 函数来完成。最后,你可以使用 PyTorch连接神经网络。以下是一个示例代码: ```python import torch import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split # 读取数据集 data = pd.read_csv('data.csv') # 分离输入特征和目标变量 X = data.iloc[:, :-1].values y = data.iloc[:, -1].values # 将分类变量转换为数字标签 le = LabelEncoder() for i in range(X.shape[1]): if type(X[0][i]) == str: X[:, i] = le.fit_transform(X[:, i]) # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # 将 numpy 数组转换为 tensor X_train = torch.from_numpy(X_train).float() y_train = torch.from_numpy(y_train).long() X_test = torch.from_numpy(X_test).float() y_test = torch.from_numpy(y_test).long() # 定义连接神经网络 class Net(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层 self.out = torch.nn.Linear(n_hidden, n_output) # 输出层 def forward(self, x): x = torch.relu(self.hidden(x)) # 激活函数 x = self.out(x) return x # 定义模型和优化器 net = Net(n_feature=X_train.shape[1], n_hidden=10, n_output=len(np.unique(y_train))) optimizer = torch.optim.SGD(net.parameters(), lr=0.01) loss_func = torch.nn.CrossEntropyLoss() # 训练模型 for epoch in range(100): out = net(X_train) loss = loss_func(out, y_train) optimizer.zero_grad() loss.backward() optimizer.step() # 输出训练误差 if epoch % 10 == 0: print('Epoch: %d | Loss: %.4f' % (epoch, loss.item())) # 测试模型 with torch.no_grad(): out = net(X_test) predictions = torch.argmax(out, axis=1) accuracy = (predictions == y_test).sum() / len(y_test) print('Accuracy:', accuracy) ``` 在这个示例中,我们使用了一个具有一个隐藏层的连接神经网络。你可以根据你的数据集和需要调整输入特征的数量、隐藏层的大小和输出层的数量。训练模型时,我们使用了交叉熵损失函数和随机梯度下降优化器。测试模型时,我们计算了模型的准确度。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值