pytorch[10]

本文介绍了卷积神经网络(CNN)的基础知识,包括卷积运算、多个卷积核的组合以及在图像识别任务中的应用。通过实例展示了如何使用PyTorch实现CNN,并在MNIST数据集上进行训练,探讨了ReLU激活函数和池化层的使用顺序对模型性能的影响。此外,还涉及到了模型训练的完整流程,包括损失函数、优化器的选择以及模型在GPU上的运行。
摘要由CSDN通过智能技术生成

  

左上角为0点

 卷积完成后  c  w  h  都可能变化

卷积运算  一个卷集核  最后只有 一个输出通道

3*w*h     变为1*  w *   h

多个卷积核,分别输出,再cat拼接

 m个卷积核     m个输出通道

卷积核组合m x  n x  w x h

import torch

in_channels,out_channels= 5, 10  #输入通道为5 输出通道为10
width,height = 100, 100
kernel_size = 3
batch_size =1

input = torch.randn(batch_size,    #小批量
                                in_channels,       #n
                                width,                   #w
                                height)                  #h   

###!!!!!!!!!
conv_layer = torch.nn.Conv2d(in_channels,       #输入n
                                                        out_channels,    #输出m
                                                        kernel_size=kernel_size)

'''
conv_layer = torch.nn.Conv2d(in_channels,       #输入n层
                                                        out_channels,    #输出m层
                                                        kernel_size=kernel_size,#卷积维度
                                                        stride =2,#步长
                                                        padding=1,#补充0
                                                        bias=False)

'''
output = conv_layer(input)

print(input.shape)
print(output.shape)
print(conv_layer.weight.shape)

补充0

代码

import torch

input = [3,4,5,6,7,
                8,9,1,2,3,
                4,5,6,7,8,
                9,0,1,2,3,
                4,5,6,7,8
                ]

input = torch.Tensor(input).view(1,1,5,5)  # 1 n  w h  

conv_layer = torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)
#conv_layer = torch.nn.Conv2d(1,2,kernel_size=3,padding=1,bias=False)

kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3) #卷积核心  m n 3 3
#kernel = torch.Tensor([1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9]).view(2,1,3,3)
conv_layer.weight.data =kernel.data

output = conv_layer(input) #输出  1   m   w'   h’
print(output)

神经网络结构

下面流程图和代码不一致  relu层和pooling层位置不一样

gpu 跑代码

import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
 
# prepare dataset
 
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
 
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
 
# design model using class
 
 
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
        self.pooling = torch.nn.MaxPool2d(2)
        self.fc = torch.nn.Linear(320, 10)
 
 
    def forward(self, x):
        # flatten data from (n,1,28,28) to (n, 784)
        
        batch_size = x.size(0)

        x = F.relu(self.pooling(self.conv1(x)))
        x = F.relu(self.pooling(self.conv2(x)))  ##先池化再relu
        '''
        x = self.pooling(F.relu(self.conv1(x)))
        x = self.pooling(F.relu(self.conv2(x))) ##先relu再池化
        '''
        x = x.view(batch_size, -1) # -1 此处自动算出的是320
        # print("x.shape",x.shape)
        x = self.fc(x)
 
        return x
 
 
model = Net()

#将模型全部放在显卡中 加入下两行
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)  #将模型全部放在显卡中
 #==========


# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
 
# training cycle forward, backward, update
 
 
def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data
        
        #将数据全部放在显卡中 加入下一行
        inputs, target = inputs.to(device), target.to(device)#将输入输出全部放在显卡中 需要保证模型和数据在同一显卡中
        #===============


        optimizer.zero_grad()
 
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()
 
        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch+1, batch_idx+1, running_loss/300))
            running_loss = 0.0
 
 
def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('accuracy on test set: %d %% ' % (100*correct/total))
    return correct/total
 
 
if __name__ == '__main__':
    epoch_list = []
    acc_list = []
    
    for epoch in range(10):
        train(epoch)
        acc = test()
        epoch_list.append(epoch)
        acc_list.append(acc)
    
    plt.plot(epoch_list,acc_list)
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.show()
 
    

作业

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值