AlexNet

import time
import numpy as np
import torch
from torch import nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader

device=torch.device('cuda'if torch.cuda.is_available()else 'gpu')

net=nn.Sequential(
    nn.Conv2d(1,96,kernel_size=11,stride=4,padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2,stride=2),
    nn.Conv2d(96,256,kernel_size=5,padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2,stride=2),
    nn.Conv2d(256,384,kernel_size=3,padding=1),
    nn.ReLU(),
    nn.Conv2d(384,384,kernel_size=3,padding=1),
    nn.ReLU(),
    nn.Conv2d(384,256,kernel_size=3,padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=2,stride=2),
    nn.Flatten(),
    nn.Linear(6*6*256,2048*2),
     nn.ReLU(),
     nn.Dropout(p=0.5),
    nn.Linear(2048*2,2048*2),
      nn.ReLU(),
      nn.Dropout(p=0.5),
      nn.Linear(2048*2,10),
)

# xx=torch.randn(size=(3,1,224,224),dtype=torch.float32)

# for layer in net:
#     xx=layer(xx)
#     print(layer.__class__.__name__,f'\t{xx.shape}')

transform=transforms.Compose(
    [transforms.Resize([224,224]),
    transforms.ToTensor()]
)

data_train = datasets.FashionMNIST(root="./dataset", transform=transform, train=True, download=False)
data_test = datasets.FashionMNIST(root="./dataset", transform=transform, train=False, download=False)

train_dataloader=DataLoader(dataset=data_train,batch_size=256,shuffle=True)
test_dataloader=DataLoader(dataset=data_test,batch_size=256,shuffle=True)


def init_weights(m):
    if type(m)==nn.Conv2d or type(m)==nn.Linear:
        nn.init.xavier_normal_(m.weight)

def train_test(epoch,lr,net,train_iter,test_iter,device):
    print("training on",device)
    net.apply(init_weights)
    net=net.to(device)

    optimizer=torch.optim.SGD(net.parameters(),lr=lr)
    l=nn.CrossEntropyLoss()
    l=l.to(device)    
    for i in range(epoch):
        correct_count=0
        test_count=0
        for data in train_iter:
          x,y=data
          x=x.to(device)
          y=y.to(device)
          out=net(x)
          _,pred=torch.max(out.data,1)
          correct_count+= torch.sum(pred ==y.data)

          optimizer.zero_grad()
          loss=l(out,y)
          loss.backward()
          optimizer.step()
        print("Train Accuracy is:{:.4f}%".format(100 * correct_count / len(data_train)))
        net.eval()
        with torch.no_grad():
            for data in test_iter:
               xx,yy=data
               xx=xx.to(device)
               yy=yy.to(device)
               out=net(xx)
              
               _,pred=torch.max(out.data,1)
               test_count+= torch.sum(pred ==yy.data)
        print("Test Accuracy is:{:.4f}%".format(100 * test_count / len(data_test)))
        print(30*'*')

lr=0.01
epoch=10
train_test(epoch,lr,net,train_dataloader,test_dataloader,device)

# for layer in net:
#     if type(layer)==nn.Conv2d:
#         conv=layer.weight.data
#         print(conv)
       

    

          

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值