图像分割数据增强,mask重合!!!

增强主要是增加图片数量,添加噪声翻转等,mask的像素不会发生变换,但是会一起翻转,注意mask最好像素还是不要改,比如mask像素类别是0,1,2,3四个类别,就不要让他改动。本文的最后再附上检查mask像素是否超标的代码,此检查代码可以有效解决报num_classes相关的错误。

一、数据增强代码:

import imgaug.augmenters as iaa  # 导入iaa
import cv2
import glob
import os
import numpy as np

if __name__ == '__main__':
    img_dir = 'F:/CT_lung_seg_and_class/seg_data/CC-CCI/image'	# 图片文件路径
    msk_dir = 'F:/CT_lung_seg_and_class/seg_data/CC-CCI/mask'	# 标签文件路径
    #img_type = '.png'
    img_tmp_dir = 'F:/CT_lung_seg_and_class/seg_data/CC-CCI_AUG/image/'	# 输出图片文件路径
    msk_tmp_dir = 'F:/CT_lung_seg_and_class/seg_data/CC-CCI_AUG/mask/'
    img_list = os.listdir(img_dir)
    msk_list = os.listdir(msk_dir)

    for i in range(len(img_list)):
        img_name = img_list[i]
        msk_name = msk_list[i]

        img = cv2.imread(filename=img_dir + "/" + img_name)
        img = np.expand_dims(img, axis=0).astype(np.float32)
        msk = cv2.imread(filename=msk_dir + "/" + msk_name)
        msk = np.expand_dims(msk, axis=0).astype(np.int32)
        # 定义数据增强策略
        # 每次选择一个翻转方式
        seq = iaa.Sequential([
            iaa.Fliplr(0.5),    # 水平翻转
            iaa.Flipud(0.5),    # 垂直翻转
            iaa.GaussianBlur(sigma=(0, 3.0)),   # 高斯模糊
            iaa.Sharpen(alpha=(0, 0.3), lightness=(0.9, 1.1)),  # 锐化处理
            iaa.Affine(scale=(0.9, 1), translate_percent=(0, 0.1), rotate=(-40, 40), cval=0, mode='constant'),   # 仿射变换
            # iaa.CropAndPad(px=(-10, 0), percent=None, pad_mode='constant', pad_cval=0, keep_size=True), # 裁剪缩放
            # iaa.PiecewiseAffine(scale=(0, 0.05), nb_rows=4, nb_cols=4, cval=0),     # 以控制点的方式随机形变
            iaa.ContrastNormalization((0.75, 1.5), per_channel=True),  # 对比度增强,0.75-1.5随机数值为alpha,该alpha应用于每个通道
            iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),  # 高斯噪声
            iaa.Multiply((0.8, 1.2), per_channel=0.2),  # 20%的图片像素值乘以0.8-1.2中间的数值,用以增加图片明亮度或改变颜色
        ])
        # 同时对原图和分割进行数据增强
        for j in range(8):
            img_aug, msk_aug = seq(images=img, segmentation_maps=msk)
            img_out = img_tmp_dir + img_name.split(".")[0] + "_" + str(j) + '.jpg'
            msk_out = msk_tmp_dir + msk_name.split(".")[0] + "_" + str(j) + '.png'
            cv2.imwrite(img_out, img_aug[0])
            cv2.imwrite(msk_out, msk_aug[0,:,:,0])
        print("正在进行数据增强{}".format(i))

二、将mask和图像进行加颜色重合代码:

import os
from PIL import Image

root_path_background = "F:/CT_lung_seg_and_class/seg_data/CC-CCI_clean/val/images/"
root_path_paste = "F:/CT_lung_seg_and_class/seg_data/CC-CCI_clean/val/mask/"
output_path = "F:/CT_lung_seg_and_class/seg_data/CC-CCI_clean/val/manual/"
img_list = os.listdir(root_path_background)
label_list = os.listdir(root_path_paste)
img_list = sorted(img_list)
label_list = sorted(label_list)
for num, img_label in enumerate(zip(img_list, label_list)):
    img = Image.open(os.path.join(root_path_background, img_label[0]))
    label = Image.open(os.path.join(root_path_paste, img_label[1]))
    label = label.convert("RGB")
    fin = Image.blend(img, label, 0.3)
    # fin.show()
    fin.save(os.path.join(output_path, img_label[0]))

将mask和原图经过图像重合后图片效果如下:

三、检查mask是否在限定类别以内,比如如果mask只包含0,1两个像素值,该代码可以找出超出像素1的图像。代码如下:

import os
import os.path as osp
from tqdm import tqdm
import cv2
import numpy as np


num_classes = 4
mask_dir = "F:/CT_lung_seg_and_class/seg_data/CC-CCI_clean/train/mask1/"
mask_names = os.listdir(mask_dir)

for mask_name in tqdm(mask_names):
    mask_path = osp.join(mask_dir, mask_name)
    mask = cv2.imread(mask_path, 0)
    h, w = mask.shape[:2]
    pix = []
    for i in range(0, num_classes):
        pix.append(len(np.where(mask==i)[0]))
    if sum(pix) != h*w:
        print("error: " + mask_name)

 

  • 6
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 7
    评论
对于需要进行精细图像分割的任务,可以使用语义分割算法,例如 U-Net,进行分割。 下面介绍如何使用 U-Net 进行精细的图像分割,提取 mask 的白色图像边缘。 1. 安装相关库 首先需要安装相关库,包括 tensorflow、keras、opencv 等。可以使用 pip 命令进行安装: ``` pip install tensorflow keras opencv-python ``` 2. 加载模型并进行图像分割 接下来,可以使用以下代码加载 U-Net 模型,并对输入图像进行分割: ```python import os import cv2 import numpy as np import tensorflow as tf from tensorflow import keras # Define the U-Net model def unet(pretrained_weights=None, input_size=(256, 256, 1)): inputs = keras.layers.Input(input_size) # Downsample conv1 = keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs) conv1 = keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1) pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1) conv2 = keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2) pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2) conv3 = keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3) pool3 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3) conv4 = keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4) drop4 = keras.layers.Dropout(0.5)(conv4) pool4 = keras.layers.MaxPooling2D(pool_size=(2, 2))(drop4) # Upsample conv5 = keras.layers.Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4) conv5 = keras.layers.Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5) drop5 = keras.layers.Dropout(0.5)(conv5) up6 = keras.layers.Conv2DTranspose(512, 2, strides=(2, 2), padding='same')(drop5) merge6 = keras.layers.concatenate([drop4, up6], axis=3) conv6 = keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6) conv6 = keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6) up7 = keras.layers.Conv2DTranspose(256, 2, strides=(2, 2), padding='same')(conv6) merge7 = keras.layers.concatenate([conv3, up7], axis=3) conv7 = keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7) conv7 = keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7) up8 = keras.layers.Conv2DTranspose(128, 2, strides=(2, 2), padding='same')(conv7) merge8 = keras.layers.concatenate([conv2, up8], axis=3) conv8 = keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8) conv8 = keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8) up9 = keras.layers.Conv2DTranspose(64, 2, strides=(2, 2), padding='same')(conv8) merge9 = keras.layers.concatenate([conv1, up9], axis=3) conv9 = keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9) conv9 = keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9) conv9 = keras.layers.Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9) outputs = keras.layers.Conv2D(1, 1, activation='sigmoid')(conv9) model = keras.models.Model(inputs=inputs, outputs=outputs) # Load the pretrained weights if pretrained_weights: model.load_weights(pretrained_weights) return model # Load the U-Net model model = unet(pretrained_weights='unet_weights.h5') # Load the input image image = cv2.imread('input_image.png', cv2.IMREAD_GRAYSCALE) image = cv2.resize(image, (256, 256)) # Normalize the image image = image.astype('float32') / 255.0 # Reshape the image to a 4D tensor image = np.reshape(image, (1, 256, 256, 1)) # Run the image through the model mask = model.predict(image) # Threshold the mask to get the edge mask[mask >= 0.5] = 1 mask[mask < 0.5] = 0 mask = mask.astype('uint8') # Find the contour of the mask contours, hierarchy = cv2.findContours(mask[0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # Draw the contour on the original image image = cv2.imread('input_image.png') cv2.drawContours(image, contours, -1, (255, 255, 255), thickness=1) # Show the result cv2.imshow('result', image) cv2.waitKey(0) cv2.destroyAllWindows() ``` 运行以上代码后,将生成一张带有mask的图像,并在其中标出了mask的边缘。如果需要更精细的分割效果,可以尝试调整模型的参数,或使用其他的图像分割算法

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 7
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值