CIFAR数据集的导入
CIFAR-10数据集有60000张32*32的彩色图片,总共10类,每一类有6000张图片。分为50000张训练集和10000张测试集
在torchvision包中可以非常方便地直接导入cifar数据集
输入dataroot,是否是训练集,transform等参数。
transform参数一般传入transforms.Compose([])进行一些变换
这里就可以从网上下载得到CIFAR的原始数据集了
cifar_train=datasets.CIFAR10('cifar',True,transform=transforms.Compose([
transforms.Resize((32,32)),
transforms.ToTensor()
]),download=True)
在程序中导入数据的时候,一般用到dataloader类,主要是指定导入的batchsize,导入时需不需要随机化处理
cifar_train=DataLoader(cifar_train,batch_size=batchsz,shuffle=True)
以上就是训练集数据的准备,测试集数据的准备同理
cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)
测试一下数据集:
用iter得到dataloader的迭代器,用next返回一个迭代对象。打印出数据的shape
x,label=iter(cifar_train).next()
print('x:',x.shape,'label:',label.shape)
out
LeNet5的实现和实验
网络结构和前向传播:
根据网络结构,首先经过两个卷积层,每个卷积层包含一个卷积操作和一次下采样(改为了pooling操作),经过两次操作之后得到的结构为[b,16,5,5]之后用view函数进行flatten操作得到[b,1655]最后经过两个线性层得到[b,10]可以得到分类结果。
def __init__(self):
super(Lenet5, self).__init__()
self.conv_unit = nn.Sequential(
# x:[b,3,32,32]=>[b,6,,]
nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
# 第二个卷积层
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
)
# flatten [b,16,5,5]
# fc unit
self.fc_unit = nn.Sequential(
nn.Linear(16*5*5, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10)
)
# test code
tmp = torch.randn(2, 3, 32, 32)
out = self.conv_unit(tmp)
# [b,16,5,5]
print('conv out', out.shape)
# use crossEntropyLoss softmax first
# self.criteon=nn.CrossEntropyLoss()
def forword(self,x):
"""
:param x: [b,3,32,32]
:return:
"""
batchsz=x.size(0)# eq s.shape[0]
# [b,3,32,32]=>[b,16,5,5]
x=self.conv_unit(x)
# [b,16,5,5]=>[b,16*5*5]
x=x.view(batchsz,16*5*5)
# [b,16*5*5] => [b,10]
logits=self.fc_unit(x)
写完了向前传播的代码之后可以在main类中写一个randn数据集来传播一下看看输出的shape作为测试。
def main():
net = Lenet5()
tmp = torch.randn(2, 3, 32, 32)
out = net(tmp)
# [b,16,5,5]
print('conv out', out.shape)
# conv out torch.Size([2, 10])
训练模型,在每个batchsize里更新一次梯度信息。
一个epoch结束(即所有batch都训练完)输出模型的损失。用当前模型进行一次前向test,在test中不需要记录梯度信息。
device = torch.device('cuda')
model = Lenet5().to(device)
criteon = nn.CrossEntropyLoss().to(device) # 包含了softmax
optimizer = optim.Adam(model.parameters(), lr=1e-3)
print(model)
for epoch in range(1000):
model.train()
for batchIndex, (x, label) in enumerate(cifar_train):
# [b,3,32,32]
# [b]
x, label = x.to(device), label.to(device)
logits = model(x)
# logits:[b,10]
# label:[b] 注意这里label可以不用给每个维度的probability
loss = criteon(logits, label)
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
#
print(epoch, loss.item())
# 测试的时候不需要反向传播
# 因此不需要计算梯度信息
model.eval()
with torch.no_grad():
# test
total_correct = 0
total_num = 0
for x, label in cifar_test:
x, label = x.to(device), label.to(device)
# [b,10]
logits = model(x)
# argmax 返回一个维度上数据最大值的索引
# max 返回一个维度上最大的数据
pred = logits.argmax(dim=1)
# 比较一个batchsize中pred和label相同的个数
total_correct += torch.eq(pred, label).float().sum().item()
total_num+=x.size(0)
acc=total_correct/total_num
print(epoch,acc)
用ResNet实现
首先实现一个block
class ResBlock(nn.Module):
"""
redNet block
"""
def __init__(self, ch_in, ch_out):
"""
:param ch_in:
:param ch_out:
"""
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra=nn.Sequential()
# 如果out和in的channel不相同则用一个卷积使之相同
if ch_out!=ch_in:
# [b,ch_in,h,w] => [b,ch_out,h,w]
self.extra=nn.Sequential(
nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
"""
:param x: [b,ch,h,w]
:return:
"""
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# short cut
# element-wight add:[b,ch_in,h,w] with [b,ch_out,h,w]
out=self.extra(x)+out
return out
实现一个ResNet18,有四个这样的block结构构成,最后连一个全连接层
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64)
)
# followed 4 blocks
# [b,64,h,w]=>[b,128,h,w]
self.blk1 = ResBlock(64, 64)
# [b,128,h,w]=>[b,256,h,w]
self.blk2 = ResBlock(64, 128)
# [b,256,h,w]=>[b,512,h,w]
self.blk3 = ResBlock(128, 256)
# [b,512,h,w]=>[b,1024,h,w]
self.blk4 = ResBlock(256, 512)
self.outlater = nn.Linear(512 * 32 * 32, 10)
def forward(self, x):
"""
:param x:
:return:
"""
x = F.relu(self.conv1(x))
# [b,64,h,w] => [b,1024,h,w]
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
x = x.view(x.size(0), -1) # flatten
x = self.outlater(x)
return x
如果网络参数太多跑不动,可以减少block的数量
在main文件中将模型改为ResNet18,其余部分不变,即可训练此网络。