pytorch 搭建自己的卷积神经网络CNN

import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
# data loading and transforming
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.autograd import Variable


data_transform = transforms.ToTensor()

#prepare data
train_data=MNIST(root='./data',train=True,transform=data_transform,download=True)
test_data=MNIST(root='./data',train=False,transform=data_transform,download=True)

print('train_data number: ',len(train_data))
print('Test_data number: ',len(test_data))

#batch size
batch_size=40
train_loader=DataLoader(train_data,batch_size=batch_size,shuffle=True)
test_loader=DataLoader(test_data,batch_size=batch_size,shuffle=True)
classes = ['0', '1', '2', '3', '4',  '5', '6', '7', '8', '9']


#make a cnn
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1=torch.nn.Sequential(
            torch.nn.Conv2d(1,10,3),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2,2),
        )
        self.conv2=torch.nn.Sequential(
            torch.nn.Conv2d(10,20,3),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2,2),
        )
        self.fc1=torch.nn.Linear(20*5*5,50)
        self.fc1_drop=torch.nn.Dropout(p=0.4)
        self.fc2=torch.nn.Linear(50,10)
    def forward(self, x):
        out_conv1=self.conv1(x)
        out_conv2=self.conv2(out_conv1)
        in_fc=out_conv2.view(out_conv2.size(0),-1)
        out_fc1=self.fc1(in_fc)
        out_drop=self.fc1_drop(out_fc1)
        out=self.fc2(out_fc1)
        return out

my_cnn=Net()

#print(my_cnn)

#定义优化器
optimizer=torch.optim.SGD(my_cnn.parameters(),lr=0.01,momentum=0.9)
loss_fun=torch.nn.CrossEntropyLoss()

#train

def train_fun(EPOCH):
    loss_list = []
    for eopch in range(EPOCH):
        for step,data in enumerate(train_loader):
            b_x,b_y=data
            b_x,b_y=Variable(b_x),Variable(b_y)
            out_put=my_cnn(b_x)
            loss=loss_fun(out_put,b_y)
            if step%100==0:
                print('Epoch: ',eopch,' Step: ',step,' loss: ',float(loss))
                loss_list.append(float(loss))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    return  loss_list



def save():
    torch.save(my_cnn,'G:/PY_test/pyTcnn/first_train.pkl')


def restore():
    torch.load('G:/PY_test/pyTcnn/first_train.pkl')

#test

def test_fun():
    correct = 0
    test_loss = torch.zeros(1)
    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    for batch_i, data in enumerate(test_loader):
        # get the input images and their corresponding labels
        inputs, labels = data
        print(inputs)
        print(labels)
        # wrap them in a torch Variable
        # volatile means we do not have to track how the inputs change
        inputs, labels = Variable(inputs, volatile=True), Variable(labels, volatile=True)
        # forward pass to get outputs
        outputs = my_cnn(inputs)
        # calculate the loss
        loss = loss_fun(outputs, labels)
        # update average test loss
        test_loss = test_loss + ((torch.ones(1) / (batch_i + 1)) * (loss.data - test_loss))
        # get the predicted class from the maximum value in the output-list of class scores
        _, predicted = torch.max(outputs.data, 1)
        # compare predictions to true label
        #print(predicted)
       # print(labels)
        correct += np.squeeze(predicted.eq(labels.data.view_as(predicted))).sum()
        acc=correct/(40*batch_i+1)
        print(' Step: ', batch_i+1, ' loss: ', float(loss),'accurary: ',acc)

#预测可以用下面这个来计算
# # Calculate accuracy before training
# correct = 0
# total = 0
#
# # Iterate through test dataset
# for images, labels in test_loader:
#
#     # forward pass to get outputs
#     # the outputs are a series of class scores
#     outputs = net(images)
#
#     # get the predicted class from the maximum value in the output-list of class scores
#     _, predicted = torch.max(outputs.data, 1)
#
#     # count up total number of correct labels
#     # for which the predicted and true labels are equal
#     total += labels.size(0)
#     correct += (predicted == labels).sum()

# # calculate the accuracy
# accuracy = 100 * correct / total

# print it out!
# print('Accuracy before training: ', accuracy)

if __name__ == '__main__':
    # training_loss=train_fun(3)
    # print(training_loss)
    # plt.plot(training_loss)
    # plt.xlabel('1000\'s of batches')
    # plt.ylabel('loss')
    # plt.ylim(0, 2.5) # consistent scale
    # plt.show()
    #save()
    restore()
    test_fun()

 

  • 4
    点赞
  • 30
    收藏
    觉得还不错? 一键收藏
  • 8
    评论
好的,让我们开始。首先,需要准备 MNIST 数据集。可以使用 PyTorch 内置的 torchvision.datasets 库来下载并加载数据集。 ```python import torch import torchvision import torchvision.transforms as transforms transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2) testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=2) ``` 接下来,我们可以定义我们的卷神经网络模型。这里我们定义了两个卷层和两个全连接层。 ```python import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.dropout2 = nn.Dropout2d(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output net = Net() ``` 接下来,我们可以定义损失函数和优化器。在这个例子中,我们使用交叉熵损失函数和随机梯度下降优化器。 ```python import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) ``` 现在,我们可以开始训练我们的模型。在每个 epoch 中,我们将遍历整个训练集,并使用随机梯度下降进行优化。此外,我们还将计算模型在测试集上的准确率。 ```python for epoch in range(10): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 100 == 99: # 每 100 个 mini-batches 输出一次损失 print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100)) running_loss = 0.0 correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) ``` 训练完成后,我们可以保存模型并测试它在新数据上的性能。 ```python PATH = './cnn.pth' torch.save(net.state_dict(), PATH) net = Net() net.load_state_dict(torch.load(PATH)) # 测试 dataiter = iter(testloader) images, labels = dataiter.next() outputs = net(images) _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % predicted[j] for j in range(10))) ``` 恭喜你,现在你已经成功地使用 PyTorch 搭建了一个卷神经网络,并且能够对手写数字进行识别!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 8
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值