torch_feed_mynet

code_torch_feed_mynet

  • 这里更改了自己的neural network

  • import torch.nn as nn
    
    
    class MyNetwork(nn.Module):
    
        def __init__(self,input_size=28,hidden_size=100 ,num_classes=10):
            pass
            super(MyNetwork,self).__init__()
            # 这里用的是两层的FC网络,中间加个ReLU激活层
            self.fc1 = nn.Linear(in_features=input_size, out_features=hidden_size)
            self.relu = nn.ReLU()
            self.fc2 = nn.Linear(in_features=hidden_size, out_features=num_classes)
    
        def forward(self,x):
            # 在forward中定义前推的流程
            out1 = self.fc1(x)
            out_relu1 = self.relu(out1)
            out2 = self.fc2(out_relu1)
            return out2
    
    
  • 然后基于logistic的代码吧model换成MyNetwork

  • import torch
    import torch.nn as nn
    import torchvision
    import torchvision.transforms as transforms
    from MyNetwork import MyNetwork
    
    ##############################################################################
    
    # 此份代码是根据torch_logistic_regression修改model部分
    
    ##############################################################################
    # Hyper-parameters
    input_size = 28 * 28
    num_classes = 10
    num_epochs = 20
    batch_size = 100
    learning_rate = 0.001
    
    
    # Mnist dataset
    train_dataset = torchvision.datasets.MNIST(root='../../data',
                                               train=True,
                                               transform=transforms.ToTensor(),
                                               download=True)
    test_dataset = torchvision.datasets.MNIST(root='../../data',
                                              train=False,
                                              transform=transforms.ToTensor())
    
    
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False)
    
    
    # model = nn.Linear(input_size, num_classes)
    model = MyNetwork(input_size=input_size, hidden_size=100,num_classes=num_classes)
    
    # Loss and optimizer
    # nn.CrossEntropyLoss() computes softmax internally
    criterion = nn.CrossEntropyLoss()
    # 这里优化器试了下Adam,效果还不错,在这个人物下比SGD好点
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    # Train the model
    total_step = len(train_loader)
    for epoch in range(num_epochs):
        for i ,(images,lables) in enumerate(train_loader):
            # Reshape images to (batch_size,input_size)
            images = images.reshape(-1,input_size)
    
            # Forward pass
            outputs = model(images)
            loss = criterion(outputs, lables)
    
            # backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 100 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
    
    
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            images = images.reshape(-1, input_size)
            outputs = model(images)
            # 拿到最大的那个当作预测结果
            _, predicted = torch.max(outputs.data,1)
            total += labels.size(0)
            correct += (predicted == labels).sum()
        # 计算精确度
        print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
    
    
    
    # Save the model checkpoint
    torch.save(model.state_dict(), 'model.ckpt')
    
    
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

椰子奶糖

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值