python实现语义分割_语义分割算法之FCN论文阅读及源码实现

论文原文

创新点

提出了一种端到端的做语义分割的方法,

wAAACwAAAAAAQABAEACAkQBADs=

在这里插入图片描述

如图,直接拿分割的ground truth作为监督信息,训练一个端到端的网络,让网络做p像素级别的预测。

如何设计网络结构

如何做像素级别的预测

wAAACwAAAAAAQABAEACAkQBADs=

在这里插入图片描述

在VGG16中的第一个全连接层的维度是25088×4096的,将之解释为512x7x7x4096的卷积核,这样最后就会得到一个featuremap。这样做的好处在于可以实现迁移学习的fine-tune。最后我们对得到的feature map进行bilinear上采样,就是反卷积层。就可以得到和原图一样大小的语义分割后的图了。

如何保证精度

我们在做upsampling时,步长是32,输入为3x500x500的时候,输出是544×544,边缘很不好。所以我们采用skip layer的方法,在浅层处减小upsampling的步长,得到的fine layer 和 高层得到的coarse layer做融合,然后再upsampling得到输出。这种做法兼顾local和global信息,即文中说的combining what and where,取得了不错的效果提升。FCN-32s为59.4,FCN-16s提升到了62.4,FCN-8s提升到62.7。可以看出效果还是很明显的。

wAAACwAAAAAAQABAEACAkQBADs=

在这里插入图片描述

论文结果

wAAACwAAAAAAQABAEACAkQBADs=

在这里插入图片描述

wAAACwAAAAAAQABAEACAkQBADs=

在这里插入图片描述

代码实现

FCN8#coding=utf-8

from keras.models import *

from keras.layers import *

import os

def crop(o1, o2, i):

o_shape2 = Model(i, o2).output_shape

outputHeight2 = o_shape2[1]

outputWidth2 = o_shape2[2]

o_shape1 = Model(i, o1).output_shape

outputHeight1 = o_shape1[1]

outputWidth1 = o_shape1[2]

cx = abs(outputWidth1 - outputWidth2)

cy = abs(outputHeight2 - outputHeight1)

if outputWidth1 > outputWidth2:

o1 = Cropping2D(cropping=((0,0), (0, cx)))(o1)

else:

o2 = Cropping2D( cropping=((0,0) , ( 0 , cx )))(o2)

if outputHeight1 > outputHeight2 :

o1 = Cropping2D( cropping=((0,cy) , ( 0 , 0 )))(o1)

else:

o2 = Cropping2D( cropping=((0, cy ) , ( 0 , 0 )))(o2)

return o1, o2

def FCN8(nClasses, input_height=416, input_width=608, vgg_level=3):

img_input = Input(shape=(input_height, input_width, 3))

x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)

x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

f1 = x

# Block 2

x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)

x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

f2 = x

# Block 3

x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)

x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)

x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

f3 = x

# Block 4

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

f4 = x

# Block 5

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

f5 = x

x = Flatten(name='flatten')(x)

x = Dense(4096, activation='relu', name='fc1')(x)

x = Dense(4096, activation='relu', name='fc2')(x)

#x = Dense(1000, activation='softmax', name='predictions')(x)

#vgg = Model(img_input, x)

#vgg.load_weights(VGG_Weights_path)

o = f5

o = (Conv2D(4096, (7, 7), activation='relu', padding='same'))(o)

o = Dropout(0.5)(o)

o = (Conv2D(4096, (1, 1), activation='relu', padding='same'))(o)

o = Dropout(0.5)(o)

o = (Conv2D(nClasses, (1, 1), kernel_initializer='he_normal'))(o)

o = Conv2DTranspose(nClasses, kernel_size=(4, 4), strides=(2, 2), use_bias=False)(o)

o2 = f4

o2 = (Conv2D(nClasses, (1, 1), kernel_initializer='he_normal'))(o2)

o, o2 = crop(o, o2, img_input)

o = Add()([o, o2])

o = Conv2DTranspose(nClasses, kernel_size=(4, 4), strides=(2, 2), use_bias=False)(o)

o2 = f3

o2 = (Conv2D(nClasses, (1, 1), kernel_initializer='he_normal'))(o2)

o2, o = crop(o2, o, img_input)

o = Add()([o2, o])

o = Conv2DTranspose(nClasses , kernel_size=(16,16), strides=(8,8), use_bias=False)(o)

o_shape = Model(img_input, o).output_shape

outputHeight = o_shape[1]

outputWidth = o_shape[2]

o = (Reshape((-1, outputHeight*outputWidth)))(o)

o = (Permute((2, 1)))(o)

o = (Activation('softmax'))(o)

model = Model(img_input, o)

model.outputWidth = outputWidth

model.outputHeight = outputHeight

return model

FCN32#coding=utf-8

from keras.models import *

from keras.layers import *

import os

def FCN32(n_classes, input_height=416, input_width=608, vgg_level=3):

img_input = Input(shape=(3, input_height, input_width))

x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)

x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

f1 = x

# Block 2

x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)

x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

f2 = x

# Block 3

x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)

x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)

x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

f3 = x

# Block 4

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

f4 = x

# Block 5

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)

x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)

x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

f5 = x

x = Flatten(name='flatten')(x)

x = Dense(4096, activation='relu', name='fc1')(x)

x = Dense(4096, activation='relu', name='fc2')(x)

x = Dense(1000, activation='softmax', name='predictions')(x)

#vgg = Model(img_input, x)

#vgg.load_weights(VGG_Weights_path)

o = f5

o = (Conv2D(4096, (7, 7), activation='relu', padding='same'))(o)

o = Dropout(0.5)(o)

o = (Conv2D(4096, (1, 1), activation='relu', padding='same'))(o)

o = Dropout(0.5)(o)

o = (Conv2D(n_classes, (1, 1), kernel_initializer='he_normal'))(o)

o = Conv2DTranspose(n_classes, kernel_size=(64, 64), strides=(32, 32), use_bias=False)(o)

o_shape = Model(img_input, o).output_shape

outputHeight = o_shape[1]

outputWidth = o_shape[2]

o = (Reshape((-1, outputHeight*outputWidth)))(o)

o = (Permute((2, 1)))(o)

o = (Activation('softmax'))(o)

model = Model(img_input, o )

model.outputWidth = outputWidth

model.outputHeight = outputHeight

return model

欢迎关注我的微信公众号GiantPadaCV,期待和你一起交流机器学习,深度学习,图像算法,优化技术,比赛及日常生活等。

wAAACwAAAAAAQABAEACAkQBADs=

图片.png

https://www.jianshu.com/p/70c4354a1035

Python量化投资网携手4326手游为资深游戏玩家推荐:《《梦幻西游》:42届武神坛之战消极比赛处理决定》

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值