UNet卷积可视化

女神节的今天写个日志,祝愿广大女同胞们越来越美

1、UNet卷积可视化(直接定义模型中间feature map可视化)

from keras.models import Model
import cv2
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers import Activation
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from pylab import *
import keras
import cv2
  
def get_row_col(num_pic):
    squr = num_pic ** 0.5
    row = round(squr)
    col = row + 1 if squr - row > 0 else row
    return row, col
  
def visualize_feature_map(img_batch):
    feature_map = np.squeeze(img_batch, axis=0)   
    feature_map_combination = []
    plt.figure()
    num_pic = feature_map.shape[2]#获取通道数(featuremap数量)
    row, col = get_row_col(num_pic)
 
    for i in range(0, num_pic):
        feature_map_split = feature_map[:, :, i]
        feature_map_combination.append(feature_map_split)
        plt.subplot(row, col, i + 1)
        plt.imshow(feature_map_split)
        axis('off')
        title('feature_map_{}'.format(i))
 
    plt.savefig('feature_map.tif')
    plt.show()
 
    # 各个特征图按1:1 
    feature_map_sum = sum(ele for ele in feature_map_combination)
    
    feature_map_sum=(feature_map_sum-np.min(feature_map_sum))/(np.max(feature_map_sum)-np.min(feature_map_sum))#融合后进一步归一化
    y_predict = np.array(feature_map_sum).astype('float')
    y_predict = np.round(y_predict,0).astype('uint8')
    y_predict *= 255
    y_predict = np.squeeze(y_predict).astype('uint8')
    cv2.imwrite("C:\\Users\\Administrator\\Desktop\\0.tif", y_predict)  
        
    plt.imshow(y_predict)
    plt.savefig("y_predict.tif") 
def create_model():#创建模型方法一
#    model = Sequential()
#    model.add(Convolution2D(9, 1, 1, input_shape=img.shape))
#    model.add(Activation('relu'))
#    model.add(MaxPooling2D(pool_size=(4, 4)))
def unet():#创建模型方法一
    inputs = Input(img.shape)
    #第一组两个卷积
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
    conv1 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #第二组两个卷积
    conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
    conv2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
#    #第三组两个卷积
#    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
#    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
#    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
#    #第四组两个卷积
#    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
#    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
#    drop4 = Dropout(0.5)(conv4)
##    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
#
#    #最底层两个卷积
#    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4)
#    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
#    drop5 = Dropout(0.5)(conv5)
    
#    #第一个上采样 和concate
#    up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
#    merge6 = concatenate([drop4,up6], axis = 3)
#    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
#    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
#    #第二个上采样 和concate
#    up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
#    merge7 = concatenate([conv3,up7], axis = 3)
#    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
#    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
#    #第三个上采样 和concate
#    up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
#    merge8 = concatenate([conv2,up8], axis = 3)
#    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
#    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
#    #第四个上采样 和concate
#    up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
#    merge9 = concatenate([conv1,up9], axis = 3)
#    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
#    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
#    conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
    conv1 = Conv2D(1, 3, activation = 'sigmoid')(pool2)
    model = Model(input = inputs, output = conv1)
    opt = Adam(lr = 1e-4)
    model.compile(optimizer = opt, loss = 'binary_crossentropy', metrics = ['accuracy'])
      
    return model
 
if __name__ == "__main__":
    img = cv2.imread('001.jpg')
    img=(img-np.min(img))/(np.max(img)-np.min(img))#输入归一化
    img_batch = np.expand_dims(img, axis=0)
    print("ss=",img.shape)  
    model = unet()
    img_batch = np.expand_dims(img, axis=0)    
    conv_img = model.predict(img_batch)  # conv_img 卷积结果
    print("conv_img=",conv_img)
    visualize_feature_map(conv_img)

1.2 结果图如下

  • 原图
    在这里插入图片描述
  • 卷积输出每个卷积核卷积后的特征图
    在这里插入图片描述
  • 最大池化
    在这里插入图片描述
  • 平均池化
    在这里插入图片描述
好的,以下是一段使用 PyTorch 实现的 UNet 卷积模型中间 feature map 可视化的代码: ```python import torch import torch.nn as nn import matplotlib.pyplot as plt # 定义 UNet 卷积模型 class UNet(nn.Module): def __init__(self): super(UNet, self).__init__() # 编码器部分 self.conv1 = nn.Conv2d(3, 64, 3, padding=1) self.bn1 = nn.BatchNorm2d(64) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(64, 64, 3, padding=1) self.bn2 = nn.BatchNorm2d(64) self.relu2 = nn.ReLU(inplace=True) # 中间部分 self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.bn3 = nn.BatchNorm2d(128) self.relu3 = nn.ReLU(inplace=True) self.conv4 = nn.Conv2d(128, 128, 3, padding=1) self.bn4 = nn.BatchNorm2d(128) self.relu4 = nn.ReLU(inplace=True) # 解码器部分 self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv5 = nn.Conv2d(192, 64, 3, padding=1) self.bn5 = nn.BatchNorm2d(64) self.relu5 = nn.ReLU(inplace=True) self.conv6 = nn.Conv2d(64, 64, 3, padding=1) self.bn6 = nn.BatchNorm2d(64) self.relu6 = nn.ReLU(inplace=True) self.conv7 = nn.Conv2d(64, 3, 3, padding=1) def forward(self, x): # 编码器部分 x1 = self.conv1(x) x1 = self.bn1(x1) x1 = self.relu1(x1) x2 = self.conv2(x1) x2 = self.bn2(x2) x2 = self.relu2(x2) # 中间部分 x3 = self.conv3(x2) x3 = self.bn3(x3) x3 = self.relu3(x3) x4 = self.conv4(x3) x4 = self.bn4(x4) x4 = self.relu4(x4) # 解码器部分 x5 = self.upsample(x4) x5 = torch.cat((x5, x2), dim=1) x5 = self.conv5(x5) x5 = self.bn5(x5) x5 = self.relu5(x5) x6 = self.conv6(x5) x6 = self.bn6(x6) x6 = self.relu6(x6) x7 = self.conv7(x6) return x7 # 定义一个函数用于可视化中间 feature map def visualize_feature_map(model, input): # 选择中间层 layer = model.conv3 # 获取该层输出 output = layer(input) # 将输出可视化 fig, axs = plt.subplots(8, 8, figsize=(16, 16)) for i in range(8): for j in range(8): axs[i, j].imshow(output[0, i * 8 + j].detach().numpy(), cmap='gray') axs[i, j].axis('off') plt.show() # 测试可视化函数 model = UNet() input = torch.randn(1, 3, 256, 256) visualize_feature_map(model, input) ``` 这段代码实现了一个简单的 UNet 卷积模型,并定义了一个 `visualize_feature_map` 函数,用于可视化模型中间层的 feature map。在函数中,我们选择了模型的第三个卷积层作为中间层,并将输入的图片传入模型,获取该层的输出,最后将输出可视化。在可视化中,我们将输出的 feature map 按 8x8 的网格排列显示。
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值