week5 8月8日

语义分割

目标检测的框:正正方方,对一个区域标号

语义分割:精确到耳朵皮肤等像素级别,对每一个像素都进行标号

用于背景虚化、路面分割

 其实就是标号的区别(dog dog和dog1 dog2) 

import torch
import torchvision
import d2l.torch
import os
#@save
d2l.torch.DATA_HUB['voc2012'] = (d2l.torch.DATA_URL + 'VOCtrainval_11-May-2012.tar',
                           '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')
voc_dir = d2l.torch.download_extract('voc2012', 'VOCdevkit/VOC2012')
print('voc_dir = ',voc_dir)



 """读取所有VOC图像并标注"""
#@save
def read_voc_images(voc_dir,is_train=True):
    #true默认是训练集
    fpath = os.path.join(voc_dir,'ImageSets','Segmentation','train.txt' if is_train else 'val.txt')
    #文件路径
    with open(fpath,'r') as f:#只读模式打开
        images_name = f.read().split()
    print('images_name.len = ',len(images_name))
    mode = torchvision.io.image.ImageReadMode.RGB
    #以rgb形式保存
    images_feature,images_label = [],[]
    for image_name in images_name:
        image_path = os.path.join(voc_dir,'JPEGImages',f'{image_name}.jpg')
        label_path = os.path.join(voc_dir,'SegmentationClass',f'{image_name}.png')
        images_feature.append(torchvision.io.read_image(image_path))
        #原始图片
        images_label.append(torchvision.io.read_image(label_path,mode))
        #png格式没有做图片压缩
    return images_feature,images_label#训练的特征和标号(真实值,用于后期比对、损失函数)
train_images,train_labels = read_voc_images(voc_dir,True)
len(train_images),len(train_labels)


n = 5
imgs = train_images[0:n]+train_labels[0:n]
imgs = [image.permute(1,2,0) for image in imgs]
d2l.torch.show_images(imgs,2,n)


#@save
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
                [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
                [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
                [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
                [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
                [0, 64, 128]]
#@save
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
               'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
               'diningtable', 'dog', 'horse', 'motorbike', 'person',
               'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']



'''构建从RGB到VOC类别索引的映射'''
#@save
def voc_colormap2label():
    colormaplabel = torch.zeros(256**3,dtype=torch.long)
    for i,colormap in enumerate(VOC_COLORMAP):
        idx = (colormap[0]*256+colormap[1])*256+colormap[2]
        colormaplabel[idx] = i
    return colormaplabel
"""将VOC标签中的RGB值映射到它们对应的类别索引"""
#@save
def voc_label_indices(label,colormaplabel):
    label = label.permute(1,2,0).numpy().astype('int32')
    idxs = [(label[:,:,0]*256+label[:,:,1])*256+label[:,:,2]]
    return colormaplabel[idxs]


y = voc_label_indices(train_labels[0],voc_colormap2label())
#第一个训练样本,从rgb映射到voc类别
y[105:115,130:140],VOC_CLASSES[1]
#在105这一块区域,有voc[1]类别(飞机)的 为1


"""随机裁剪特征和标签图像"""
#@save
def voc_rand_crop(feature,label,height,width):
    rect = torchvision.transforms.RandomCrop.get_params(feature,(height,width))
    feature = torchvision.transforms.functional.crop(feature,*rect)
    label = torchvision.transforms.functional.crop(label,*rect)
    return feature,label

imgs = []
for _ in range(n):
    imgs += voc_rand_crop(train_images[0],train_labels[0],200,300)
imgs = [img.permute(1,2,0) for img in imgs]
d2l.torch.show_images(imgs[::2]+imgs[1::2],2,n)



"""一个用于加载VOC数据集的自定义数据集"""
#@save
class VOCSegDataset(torch.utils.data.Dataset):
    def __init__(self,is_train,crop_size,voc_dir):
        #是否是训练集、裁剪大小、文件地址
        self.transform = torchvision.transforms.Normalize(
            mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        self.crop_size = crop_size
        #小批量时要把图片一样大
        #resize拉伸的时候是用插值,而label不好拉,所以不好用resize
        features,labels = read_voc_images(voc_dir,is_train)
        self.features = [self.transform_normalize(feature) 
                         for feature in self.filter(features)]
        self.labels = self.filter(labels)
        self.colormap2label = voc_colormap2label()
        print('dataset.len =',len(self.features))
    def filter(self,images):
        #如果图片比高宽还小,就不要了
        images = [image for image in images if (
            image.shape[1]>=self.crop_size[0] and
            image.shape[2]>=self.crop_size[1])]
        return images
    def transform_normalize(self,image):
        return self.transform(image.float()/255)
    def __getitem__(self, item):
        #每次返回第i个样本时,要做的事情
        feature,label = voc_rand_crop(#对每个样本随机裁剪
            self.features[item],self.labels[item],*self.crop_size)
        return (feature,voc_label_indices(label,self.colormap2label))
        #返回特征,以及rgb值映射出的voc坐标
    def __len__(self):
        return len(self.features)



crop_size = (320,480)
voc_train = VOCSegDataset(is_train=True,crop_size=crop_size,voc_dir=voc_dir)
voc_val = VOCSegDataset(is_train=False,crop_size=crop_size,voc_dir=voc_dir)


batch_size = 64
train_iter = torch.utils.data.DataLoader(voc_train,batch_size,shuffle=True,drop_last=True,num_workers = d2l.torch.get_dataloader_workers())
for X,Y in train_iter:
    print(X.shape)
    print(Y.shape)
    break


def load_data_voc(crop_size,batch_size):
    """加载VOC语义分割数据集"""
    voc_dir = d2l.torch.download_extract('voc2012', os.path.join(
        'VOCdevkit', 'VOC2012'))
    num_workers = d2l.torch.get_dataloader_workers()
    voc_train = VOCSegDataset(is_train=True,crop_size=crop_size,voc_dir=voc_dir)
    voc_val = VOCSegDataset(is_train=False,crop_size=crop_size,voc_dir=voc_dir)
    train_iter = torch.utils.data.DataLoader(voc_train,batch_size,shuffle=True,drop_last=True,num_workers=num_workers)
    val_iter = torch.utils.data.DataLoader(voc_val,batch_size,shuffle=False,drop_last=True,num_workers=num_workers)
    return (train_iter,val_iter)


 目标框变形之后,用一个更大的目标框囊括变形的框


转置卷积

 

 填充是n,对于转置卷积的输出矩阵,删除上下左右的n行/列。

步幅也同样作用的转置卷积的输出矩阵上。

因为想得到每个像素的信息,所以通过转置卷积放大每个像素点。 

 全连接卷积神经网络

import torch
import d2l.torch
import torchvision
from torch import nn
from torch.nn import functional as F
pretrained_net = torchvision.models.resnet18(pretrained=True)
#用resnet18网络的预训练部分,就是网络的后两层不要
net = nn.Sequential(*list(pretrained_net.children())[:-2])
#后两层用以下代替
net.add_module('final_conv',nn.Conv2d(512,num_classes,kernel_size=1))
net.add_module('transpose_conv',nn.ConvTranspose2d(num_classes,num_classes,kernel_size=64,padding=16,stride=32))

#转置卷积层参数初始化
def bilinear_weight(in_channels,out_channels,kernel_size):
    factor = (kernel_size+1)//2
    if kernel_size%2 == 1:
        center = factor-1
    else:
        center = factor-0.5
    og = (torch.arange(kernel_size).reshape(-1,1),torch.arange(kernel_size).reshape(1,-1))
    filt = (1-torch.abs(og[0]-center)/factor)*(1-torch.abs(og[1]-center)/factor)
    #注意Pytorch中ConvTranspose2d()转置卷积层权重参数形状大小是(input_channels,output_channels,height,width),跟Conv2d()卷积层权重参数形状大小:(output_channels,input_channels,height,width)不一样,两者input_channels和output_channels通道数位置相反。
    weight = torch.zeros(size=(in_channels,out_channels,kernel_size,kernel_size))
    weight[range(in_channels),range(out_channels),:,:] = filt
    return  weight


#初始化网络参数
conv_transpose_weight = bilinear_weight(num_classes,num_classes,64)
net.transpose_conv.weight.data.copy_(conv_transpose_weight)

#下载数据集
batch_size,crop_size = 32,(320,480)
train_iter,val_iter = d2l.torch.load_data_voc(batch_size,crop_size)

#定义损失
def loss(inputs,targets):
    return F.cross_entropy(inputs,targets,reduction='none').mean(1).mean(1)

#训练
num_epochs,lr,weight_decay,devices= 5,1e-3,1e-3,d2l.torch.try_all_gpus()
optim = torch.optim.SGD(params=net.parameters(),lr =lr,weight_decay=weight_decay)
d2l.torch.train_ch13(net,train_iter,val_iter,loss,optim,num_epochs,devices)

训练结果 

 

#预测
def predict(img):
    #将输入图片在各个通道标准化
    X = val_iter.dataset.normalize_image(img).unsqueeze(0)
    #转换成卷积神经网络所需要的四维输入格式
    preds = net(X.to(devices[0])).argmax(dim=1)
    return preds.reshape(preds.shape[1],preds.shape[2])
def label2image(pred):
    #将预测类别映射回标注的颜色
    colormap = torch.tensor(d2l.torch.VOC_COLORMAP)
    pred = pred.long()
    return colormap[pred,:]

voc_dir = d2l.torch.download_extract('voc2012', 'VOCdevkit/VOC2012')
test_images,test_labels = d2l.torch.read_voc_images(voc_dir,is_train=False)
n,images = 4,[]
#因为数据集大小各异,且用32的步幅,导致高宽无法被32整除时,会有差异
#所以截取多块高宽32的整数倍的矩形区域
for i in range(n):
    crop_rect = (0,0,320,480)
    crop_img = torchvision.transforms.functional.crop(test_images[i],*crop_rect)
    crop_label = torchvision.transforms.functional.crop(test_labels[i],*crop_rect)
    pred_image = label2image(predict(crop_img))
    images+=[crop_img.permute(1,2,0),pred_image.cpu(),crop_label.permute(1,2,0)]
d2l.torch.show_images(images[::3]+images[1::3]+images[2::3],3,n,scale=2)

 预测结果

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值