深度学习:卷积神经网络CNN

卷积神经网络

卷积神经网络通常由卷积层、激活层、池化层(下采样层)、全链接层和概率输出层等组成。

为什么是卷积神经网络?通俗来说,卷积神经网络所具备的一些特性,例如locality(局部性)和 translation equivariance(平移不变性)等,可以有效地将大数据量的图片降维成小数据量的特征向量,从而实现对图片的特征提取。

卷积层

通过多通道的卷积核对输入图像进行卷积运算得到多通道的特征图,其中卷积核中有权重等参数。

  • 原则1:卷积核的通道数与输入图像的通道数相同,灰度图像对应卷积核的通道数为1,RGB图像对应卷积核的通道数为3。
  • 原则2:输出特征图的通道数与卷积核的个数相同,从而实现通过多个卷积核提取得到多个特征图。

下层卷积层提取图像的局部特征,上层卷积层提取图像的全局特征。

激活层

激活层是基于一个非线性函数对输入特征图进行逐元素的变换。ReLU函数是卷积神经网络中最常用的非线性激活函数。

激活层通常不包含可学习参数。

池化层

池化层在特征图的局部区域内计算最大值或平均值,从而降低特征图的分辨率(降维),有利于提取深层特征,节省计算量,防止过拟合。

常用的池化方法包括:最大池化和平均池化。

全链接层

全链接层通过矩阵乘法将输入特征映射为输出特征,全链接层包含可学习参数w和b。

概率输出层

将任意特征向量转换为概率分布向量。
二分类问题可用sigmoid函数。
多分类问题可用softmax函数。

利用Pytorch搭建卷积神经网络实战

手写二维卷积的实现,使用车辆分类数据集进行分类实验,主要难点在于手动定义二维卷积操作,如:自定义单通道卷积、自定义多通道卷积、自定义卷积层等。

import torch  
import numpy as np  
import random  
from IPython import display  
from matplotlib import pyplot as plt  
import torch.utils.data as Data  
from PIL import Image  
import os  
from torch import nn  
import torch.optim as optim  
from torch.nn import init  
import torch.nn.functional as F  
import time  
plt.switch_backend('agg')

#读取图片数据集  
def readimg(bus_img,car_img,truck_img):  
    for home, dirs, files in os.walk(Result_Path):  
        for dir in dirs:  
            #判断每一类的第一个  
            cal = 0  
            #每一类的路径  
            curdir = Result_Path + "/" + dir  
            for img in os.listdir(curdir):  
                img = Image.open(Result_Path + "/" + dir + "/"+ img)  
                img = np.array(img,np.float32)  
                #增加一个维度  
                img = np.expand_dims(img, 0)  
                #归一化  
                img = img/255  
                #1*H*W*C转换为1*C*H*W 不共享内存  
                tensor_img = torch.tensor(np.transpose(img, (0, 3, 1,2)))  
                #公交一类  
                if dir == "bus":  
                    if cal == 0:  
                        bus_img = tensor_img  
                    else:  
                        bus_img = torch.cat((bus_img,tensor_img),0)   
                elif dir == "car":  
                    if cal == 0:  
                        car_img = tensor_img  
                    else:  
                        car_img = torch.cat((car_img,tensor_img),0)   
                elif dir=="truck":  
                    if cal == 0:  
                        truck_img = tensor_img  
                    else:  
                        truck_img = torch.cat((truck_img,tensor_img),0)   
                cal = cal + 1  
    return bus_img,car_img,truck_img  
C ,H ,W = 3,64,64  
#分别读取三个类别的数据集bus、car、truck  
bus_img = torch.zeros(1,C,H,W)  
car_img = torch.zeros(1,C,H,W)  
truck_img = torch.zeros(1,C,H,W)  
bus_img,car_img,truck_img = readimg(bus_img,car_img,truck_img)  
#划分数据集为训练集和测试集,其中每类的后大约25%作为测试集  
trian_bus_img = bus_img[:int(bus_img.shape[0]*3/4)]    
trian_car_img = car_img[:int(car_img.shape[0]*3/4)]    
trian_truck_img = truck_img[:int(truck_img.shape[0]*3/4)]    
test_bus_img = bus_img[int(bus_img.shape[0]*3/4):]    
test_car_img = car_img[int(car_img.shape[0]*3/4):]    
test_truck_img = truck_img[int(truck_img.shape[0]*3/4):]    
#合并所有类别的训练集和测试集  
train_img = torch.cat((trian_bus_img,trian_car_img,trian_truck_img),0)  
print(train_img.shape)  
print(train_img.shape,file=f)  
test_img = torch.cat((test_bus_img,test_car_img,test_truck_img),0)  
print(test_img.shape)  
print(test_img.shape,file=f)  
  
#针对类别定义标签,这里设置bus--0、car--1、truck--2  
label_bus_img = torch.zeros(bus_img.shape[0])  
label_car_img = torch.ones(car_img.shape[0])  
label_truck_img = torch.ones(truck_img.shape[0])+1  
#划分标签为训练集和测试集,其中每类的后大约25%作为测试集的标签  
train_label_bus_img = label_bus_img[:int(label_bus_img.shape[0]*3/4)]  
train_label_car_img = label_car_img[:int(label_car_img.shape[0]*3/4)]  
train_label_truck_img = label_truck_img[:int(label_truck_img.shape[0]*3/4)]  
test_label_bus_img = label_bus_img[int(label_bus_img.shape[0]*3/4):]    
test_label_car_img = label_car_img[int(label_car_img.shape[0]*3/4):]    
test_label_truck_img = label_truck_img[int(label_truck_img.shape[0]*3/4):]   
#合并所有类别的训练集和测试集的标签  
train_label = torch.cat((train_label_bus_img,train_label_car_img,train_label_truck_img),0)  
print(train_label.shape)  
print(train_label.shape,file=f)  
test_label = torch.cat((test_label_bus_img,test_label_car_img,test_label_truck_img),0)  
print(test_label.shape)  
print(test_label.shape,file=f)  
#定义相关参数  
batch_size = 32  
num_classes = 3  
lr = 0.001  
epochs = 5  
device = torch.device("cpu")  
#利用DataLoader组合迭代读取数据  
# 将训练数据的特征和标签组合  
dataset = Data.TensorDataset(train_img, train_label)  
# 把 dataset 放入 DataLoader  
train_iter = Data.DataLoader(  
    dataset=dataset, # torch TensorDataset format  
    batch_size=batch_size, # mini batch size  
    shuffle=True, # 是否打乱数据 (训练集一般需要进行打乱)  
    num_workers=1, # 多线程来读数据, 注意在Windows下需要设置为0  
)  
# 将测试数据的特征和标签组合  
dataset = Data.TensorDataset(test_img, test_label)  
# 把 dataset 放入 DataLoader  
test_iter = Data.DataLoader(  
    dataset=dataset, # torch TensorDataset format  
    batch_size=batch_size, # mini batch size  
    shuffle=True, # 是否打乱数据 (训练集一般需要进行打乱)  
    num_workers=1, # 多线程来读数据, 注意在Windows下需要设置为0  
)
#自定义单通道卷积  
def corr2d(X,K):  
    ''''' 
    X:输入,shape (batch_size,H,W) 
    K:卷积核,shape (k_h,k_w) 
    单通道 
    '''  
    batch_size,H,W = X.shape  
    k_h, k_w = K.shape  
    #初始化结果矩阵  
    Y = torch.zeros((batch_size,H - k_h + 1,W- k_w + 1))  
    for i in range(Y.shape[1]):  
        for j in range(Y.shape [2]):  
            Y[:,i,j] = (X[:,i:i+k_h,j:j+k_w]* K).sum()  
    return Y  
  
#自定义多通道卷积  
def corr2d_mu1ti_in(X, K):  
    #输入X:维度(batch_size,C_in,H, W)  
    #卷积核K:维度(C_in,k_h,k_w)  
    #输出:维度(batch_size,H_out,W_out)  
      
    #先计算第一通道  
    res = corr2d(X[:,0,:,:], K[0,:,:])  
    for i in range(1, X.shape[1]):  
        #按通道相加  
        res += corr2d(X[:,i,:,:], K[i,:,:])  
    return res  
  
#自定义多个多通道卷积  
def corr2d_multi_in_out(X, K):  
    # X: shape (batch_size,C_in,H,W)  
    # K: shape (C_out,C_in,h,w)  
    # Y: shape(batch_size,C_out,H_out,W_out)  
    return torch.stack([corr2d_mu1ti_in(X, k) for k in K],dim=1)   
#自定义卷积层  
class MyConv2D(nn.Module):  
    def __init__(self,in_channels, out_channels,kernel_size):  
        super(MyConv2D,self).__init__()  
        #初始化卷积层的2个参数:卷积核、偏差  
        #isinstance判断类型  
        if isinstance(kernel_size,int):  
            kernel_size = (kernel_size,kernel_size)  
            self.weight = nn.Parameter(torch.randn((out_channels, in_channels) + kernel_size))  
            self.bias = nn.Parameter(torch.randn(out_channels,1,1))  
    def forward(self,x):  
        ''''' 
        x:输入图片,维度(batch_size,C_in,H,W) 
        '''  
        return corr2d_multi_in_out(x,self.weight) + self.bias 
#添加自定义卷积层到模块中  
class MyConvModule(nn.Module):  
    def __init__(self):  
        super(MyConvModule,self).__init__()  
        #定义一层卷积层  
        self.conv = nn.Sequential(  
            MyConv2D(in_channels = 3,out_channels = 32,kernel_size = 3),  
            nn.BatchNorm2d(32),  
            # inplace-选择是否进行覆盖运算  
            nn.ReLU(inplace=True)  
        )  
        #输出层,将通道数变为分类数量  
        self.fc = nn.Linear(32,num_classes)  
          
    def forward(self,x):  
        #图片经过一层卷积,输出维度变为(batch_size,C_out,H,W)  
        out = self.conv(x)  
        #使用平均池化层将图片的大小变为1x1,第二个参数为最后输出的长和宽(这里默认相等了)64-3/1 + 1 =62  
        out = F.avg_pool2d(out,62)  
        #将张量out从shape batchx32x1x1 变为 batch x32  
        out = out.squeeze()  
        #输入到全连接层将输出的维度变为3  
        out = self.fc(out)  
        return out   
#初始化模型  
net = MyConvModule().to(device)  
#使用多元交叉熵损失函数  
criterion = nn.CrossEntropyLoss()  
#使用Adam优化器  
optimizer = optim.Adam(net.parameters(),lr = lr)
#训练迭代和测试迭代函数定义
def train_epoch(net, data_loader, device):  
      
    net.train() #指定当前为训练模式  
    train_batch_num = len(data_loader) #记录共有多少个batch   
    total_1oss = 0 #记录Loss  
    correct = 0 #记录共有多少个样本被正确分类  
    sample_num = 0 #记录样本总数  
      
    #遍历每个batch进行训练  
    for batch_idx, (data,target) in enumerate (data_loader):  
        #将图片放入指定的device中  
        data = data.to(device).float()  
        #将图片标签放入指定的device中  
        target = target.to(device).long()  
        #将当前梯度清零  
        optimizer.zero_grad()  
        #使用模型计算出结果  
        output = net(data)  
        #计算损失  
        loss = criterion(output, target.squeeze())  
        #进行反向传播  
        loss.backward()  
        optimizer.step()  
        #累加loss  
        total_1oss += loss.item( )  
        #找出每个样本值最大的idx,即代表预测此图片属于哪个类别  
        prediction = torch.argmax(output, 1)  
        #统计预测正确的类别数量  
        correct += (prediction == target).sum().item()  
        #累加当前的样本总数  
        sample_num += len(prediction)  
    #计算平均oss与准确率  
    loss = total_1oss / train_batch_num  
    acc = correct / sample_num  
    return loss, acc  

def test_epoch(net, data_loader, device):  
    net.eval() #指定当前模式为测试模式  
    test_batch_num = len(data_loader)  
    total_loss = 0  
    correct = 0  
    sample_num = 0  
    #指定不进行梯度变化  
    with torch.no_grad():  
        for batch_idx, (data, target) in enumerate(data_loader):  
            data = data.to(device).float()  
            target = target.to(device).long()   
            output = net(data)  
            loss = criterion(output, target)  
            total_loss += loss.item( )  
            prediction = torch.argmax(output, 1)  
            correct += (prediction == target).sum().item()  
            sample_num += len(prediction)  
    loss = total_loss / test_batch_num  
    acc = correct / sample_num  
    return loss,acc
def train_epoch(net, data_loader, device):  
       
    net.train() #指定当前为训练模式  
    train_batch_num = len(data_loader) #记录共有多少个batch   
    total_1oss = 0 #记录Loss  
    correct = 0 #记录共有多少个样本被正确分类  
    sample_num = 0 #记录样本总数  
      
    #遍历每个batch进行训练  
    for batch_idx, (data,target) in enumerate (data_loader):  
        #将图片放入指定的device中  
        data = data.to(device).float()  
        #将图片标签放入指定的device中  
        target = target.to(device).long()  
        #将当前梯度清零  
        optimizer.zero_grad()  
        #使用模型计算出结果  
        output = net(data)  
        #计算损失  
        loss = criterion(output, target.squeeze())  
        #进行反向传播  
        loss.backward()  
        optimizer.step()  
        #累加loss  
        total_1oss += loss.item( )  
        #找出每个样本值最大的idx,即代表预测此图片属于哪个类别  
        prediction = torch.argmax(output, 1)  
        #统计预测正确的类别数量  
        correct += (prediction == target).sum().item()  
        #累加当前的样本总数  
        sample_num += len(prediction)  
    #计算平均oss与准确率  
    loss = total_1oss / train_batch_num  
    acc = correct / sample_num  
    return loss, acc  
  
def test_epoch(net, data_loader, device):  
    net.eval() #指定当前模式为测试模式  
    test_batch_num = len(data_loader)  
    total_loss = 0  
    correct = 0  
    sample_num = 0  
    #指定不进行梯度变化  
    with torch.no_grad():  
        for batch_idx, (data, target) in enumerate(data_loader):  
            data = data.to(device).float()  
            target = target.to(device).long()   
            output = net(data)  
            loss = criterion(output, target)  
            total_loss += loss.item( )  
            prediction = torch.argmax(output, 1)  
            correct += (prediction == target).sum().item()  
            sample_num += len(prediction)  
    loss = total_loss / test_batch_num  
    acc = correct / sample_num  
    return loss,acc 

#存储每一个epoch的loss与acc的变化,便于后面可视化  
train_loss_list = []  
train_acc_list = []  
test_loss_list = []  
test_acc_list = []  
time_list = []  
timestart = time.clock()  
#进行训练  
for epoch in range(epochs):  
    #每一个epoch的开始时间  
    epochstart = time.clock()  
      
    #在训练集上训练  
    train_loss, train_acc = train_epoch(net,data_loader=train_iter, device=device )  
    #在测试集上验证  
    test_loss, test_acc = test_epoch(net,data_loader=test_iter, device=device)  
      
    #每一个epoch的结束时间  
    elapsed = (time.clock() - epochstart)  
    #保存各个指际  
    train_loss_list.append(train_loss)  
    train_acc_list.append(train_acc )  
    test_loss_list.append(test_loss)  
    test_acc_list.append(test_acc)  
    time_list.append(elapsed)  
    print('epoch %d, train_loss %.6f,test_loss %.6f,train_acc %.6f,test_acc %.6f,Time used %.6fs'%(epoch+1, train_loss,test_loss,train_acc,test_acc,elapsed))  
#计算总时间  
timesum = (time.clock() - timestart)  
print('The total time is %fs',timesum)  

#绘图
x = np.linspace(0,len(train_loss_list),len(train_loss_list))  
plt.plot(x,train_loss_list,label="train_loss",linewidth=1.5)  
plt.plot(x,test_loss_list,label="test_loss",linewidth=1.5)  
plt.xlabel("epoch")  
plt.ylabel("loss")  
plt.legend()  
#plt.show()  
plt.savefig('2loss.jpg')  
plt.clf()  
  
x = np.linspace(0,len(train_loss_list),len(train_loss_list))  
plt.plot(x,train_acc_list,label="train_acc",linewidth=1.5)  
plt.plot(x,test_acc_list,label="test_acc",linewidth=1.5)  
plt.xlabel("epoch")  
plt.ylabel("acc")  
plt.legend()  
#plt.show()  
plt.savefig('2acc.jpg') 

此外,也可以利用torch.nn定义模型,其余代码均相同,在此只呈现模型定义部分的代码:

#pytorch封装卷积层  
class ConvModule(nn.Module):  
    def __init__(self):  
        super(ConvModule,self).__init__()  
        #定义三层卷积层  
        self.conv = nn.Sequential(  
            #第一层  
            nn.Conv2d(in_channels = 3,out_channels = 32,kernel_size = 3 , stride = 1,padding=0),  
            nn.BatchNorm2d(32),  
            # inplace-选择是否进行覆盖运算  
            nn.ReLU(inplace=True),  
            #第二层  
            nn.Conv2d(in_channels = 32,out_channels = 64,kernel_size = 3 , stride = 1,padding=0),  
            nn.BatchNorm2d(64),  
            # inplace-选择是否进行覆盖运算  
            nn.ReLU(inplace=True),  
            #第三层  
            nn.Conv2d(in_channels = 64,out_channels = 128,kernel_size = 3 , stride = 1,padding=0),  
            nn.BatchNorm2d(128),  
            # inplace-选择是否进行覆盖运算  
            nn.ReLU(inplace=True)  
        )  
        #输出层,将通道数变为分类数量  
        self.fc = nn.Linear(128,num_classes)  
          
    def forward(self,x):  
        #图片经过三层卷积,输出维度变为(batch_size,C_out,H,W)  
        out = self.conv(x)  
        #使用平均池化层将图片的大小变为1x1,第二个参数为最后输出的长和宽(这里默认相等了)(64-3)/1 + 1 =62  (62-3)/1+1 =60 (60-3)/1+1 =58  
        out = F.avg_pool2d(out,58)  
        #将张量out从shape batchx128x1x1 变为 batch x128  
        out = out.squeeze()  
        #输入到全连接层将输出的维度变为3  
        out = self.fc(out)  
        return out  
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI Player

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值