DenseNet代码

import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable

transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data',train=True,download=True,transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data',train=False,download=True,transform=transform)
classes=('plane','car','bird','cat','deer','dog','frog','horse','ship','truck')

class dense_block(nn.Module):
    def __init__(self,in_channels):
        super(dense_block,self).__init__()

        self.relu = nn.ReLU()
        self.bn = nn.BatchNorm2d(num_features=in_channels)

        self.conv1 = nn.Conv2d(in_channels=in_channels,out_channels=32,kernel_size=3,padding=1,stride=1)
        self.conv2 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=3,padding=1,stride=1)
        self.conv3 = nn.Conv2d(in_channels=64,out_channels=32,kernel_size=3,padding=1,stride=1)
        self.conv4 = nn.Conv2d(in_channels=96,out_channels=32,kernel_size=3,padding=1,stride=1)
        self.conv5 = nn.Conv2d(in_channels=128,out_channels=32,kernel_size=3,padding=1,stride=1)

    def forward(self,x):
        bn = self.bn(x)
        conv1 = self.relu(self.conv1(bn))

        conv2 = self.relu(self.conv2(conv1))

        cat1_dense = self.relu(torch.cat([conv1,conv2],1))

        conv3 = self.relu(self.conv3(cat1_dense))

        cat2_dense = self.relu(torch.cat([conv1,conv2,conv3],1))

        conv4 = self.relu(self.conv4(cat2_dense))

        cat3_dense = self.relu(torch.cat([conv1,conv2,conv3,conv4],1))

        conv5 = self.relu(self.conv5(cat3_dense))

        cat4_dense = self.relu(torch.cat([conv1,conv2,conv3,conv4,conv5],1))

        return cat4_dense

class transition_block(nn.Module):
    def __init__(self,in_channels,out_channels):
        super(transition_block,self).__init__()
        self.relu = nn.ReLU(inplace=True)
        self.bn = nn.BatchNorm2d(num_features=out_channels)
        self.conv = nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=1,bias=False)
        self.avg_pl = nn.AvgPool2d(kernel_size=2,stride=2,padding=0)

    def forward(self,x):
        bn = self.bn(self.relu(self.conv(x)))
        output = self.avg_pl(bn)
        return output




class DenseNet(nn.Module):
    def __init__(self,num_classes):
        super(DenseNet,self).__init__()

        self.in_conv = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=7,padding=3,bias=False)
        self.relu = nn.ReLU()

        self.denseblock1 = self.add_dense_block(dense_block,64)
        self.transitionLayer1 = self.add_transition_block(transition_block,in_channels=160,out_channels=128)

        self.denseblock2 = self.add_dense_block(dense_block,128)
        self.transitionLayer2 = self.add_transition_block(transition_block,in_channels=160,out_channels=128)

        self.denseblock3 = self.add_dense_block(dense_block,128)
        self.transitionLayer3 = self.add_transition_block(transition_block,in_channels=160,out_channels=64)

        self.denseblock4 = self.add_dense_block(dense_block,64)

        self.bn = nn.BatchNorm2d(num_features=64)
        self.lastlayer = nn.Linear(64*4*4,512)
        self.final = nn.Linear(512,num_classes)


    def add_dense_block(self,block,in_channels):
        layer=[]
        layer.append(block(in_channels))
        D_seq = nn.Sequential(*layer)
        return D_seq

    def add_transition_block(self,layers,in_channels,out_channels):
        trans=[]
        trans.append(layers(in_channels,out_channels))
        T_seq = nn.Sequential(*trans)
        return T_seq


    def forward(self, x):
        out = self.relu(self.in_conv(x))
        out = self.denseblock1(out)
        out = self.transitionLayer1(out)

        out = self.denseblock2(out)
        out = self.transitionLayer2(out)

        out = self.denseblock3(out)
        out = self.transitionLayer3(out)

        out = self.bn(out)
        out = out.view(-1,64*4*4)

        out = self.lastlayer(out)
        out = self.final(out)

        return out

if __name__ == '__main__':
    gpu = torch.cuda.is_available()
    learning_rate = 0.001
    momentum = 0.95
    batch_size = 10
    num_classes = 10
    num_epochs = 10
    loss_check = []

    Net = DenseNet(num_classes)
    if gpu:
        Net.cuda()

    trainloader = torch.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,num_workers=4)
    testloader = torch.utils.data.DataLoader(testset,batch_size=batch_size,shuffle=False,num_workers=4)

    criterion = nn.CrossEntropyLoss().cuda() if gpu else nn.CrossEntropyLoss()
    optimizer = optim.SGD(Net.parameters(),lr=learning_rate,momentum=momentum,nesterov=False)

    print("Start Training...")

    for epoch in range(num_epochs):
        running_loss = 0.0
        for i,data in enumerate(trainloader,0):
            inputs,labels = data
            inputs = Variable(inputs).cuda()
            labels = Variable(labels).cuda()

            optimizer.zero_grad()

            outputs = Net(inputs)
            loss = criterion(outputs,labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if i % 2000 == 1999:
                loss_check.append(running_loss/ 2000)
                print('[%d,%5d] loss : %.3f' % (epoch + 1,i + 1,running_loss / 2000))
                running_loss = 0.0
    print("^^^^^^^^^^^^^^^^^^^^^^^^")
    print('Finished Training.')

    correct = 0
    total = 0

    for data in testloader:
        images ,labels = data
        images = Variable(images).cuda()
        outputs = Net(images)
        _,predicted = torch.max(outputs.cpu().data,1)
        total += labels.size(0)
        correct += (predicted == labels).sum()
    print('Accuracy on 10000 test image  : %d %% ' % (100 * correct / total))






















评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值