传统分割方法汇总(包括SLIC、Ncut、watershed、graph-based segmentation、Meanshit、最大熵分割)

网上都有
这里汇总下遇到的传统分割方法:
实验统一用这张老虎
示例图像
1.SLIC
这个方法python的skimage.segmentation提供了封装方法,在这里我顺便使用了一个图像切片合并的python包,一起记录下

# -*- coding: utf-8 -*-
import cv2
import math
import image_slicer
import numpy as np
from PIL import ImageDraw, ImageFont, Image
from skimage.segmentation import slic, mark_boundaries
from skimage import morphology

tiles = image_slicer.slice('sample.jpg',4,save=False) #Cut this pic into 4 pieces
image_slicer.save_tiles(tiles, directory='./tiles/',prefix='t',format='png') #save the 4 pieces pic in designated spot

#The function of the follow 4 lines is the same with the previous line.
# for tile in tiles:
# 	overlay = ImageDraw.Draw(tile.image)
# 	overlay.text((5,5), str(tile.number), (255,255,255), ImageFont.load_default())
# image_slicer.save_tiles(tiles)

# segmentating the 4 pieces pic one by one
for tile in tiles:
    tile_temp = np.array(tile.image)
    segment = slic(tile_temp, n_segments=600, compactness=20)
    segment_temp = mark_boundaries(tile_temp, segment)
    cv2.imwrite('t.png',np.uint8(segment_temp[...,0]))
    tile.image = Image.fromarray(np.uint8(segment_temp*255))

#join the 4 pieces after dealed with SLIC
im = image_slicer.join(tiles)
im.save('1.png','png')

图像切片
切片
SLIC分割结果:
SLIC结果
注意循环里的那个t.png中间文件是分割网格,这个网格在用于遥感影像时可以转化为矢量,然后就有了分割图斑。
网格
存在的问题:我用了这个图像切片合并的包,仔细观察会发现接缝处会有明显痕迹,这是拼接无法避免的。

2.NCut
感觉这个方法也不错,下面是一个命令行参数形式的代码,复制代码,在命令行输入:python .\Ncut.py 4 .\sample.jpg ./temp.jpg -norm 就会得到结果了,其中4是要分的类别数,需要自己指定。 链接https://github.com/gravins/image-segmentation,代码:

from sklearn.cluster import KMeans
import random
from PIL import Image
import pandas as pd
from sklearn import preprocessing
import sys
import skimage
from skimage import segmentation, color
from skimage.future import graph
from matplotlib import pyplot as plt
import numpy as np

if len(sys.argv) not in range(4, 6):
    raise SyntaxError("You must specify 4 arguments value:\n\tnumber of cluster\n\tpath for input file\n\tpath for output file\n\tuse -norm to enabled normalization (default disabled)")

try:
    cluster_number = int(sys.argv[1])

    # Open image
    img = Image.open(sys.argv[2])
except:
    raise SyntaxError("You must specify 4 arguments value:\n\tnumber of cluster\n\tpath for input file\n\tpath for output file\n\tuse -norm to enabled normalization (default disabled)")

if len(sys.argv) == 5 and sys.argv[4] not in ["-norm"]:
    raise SyntaxError("You must specify normalization mode with \"-norm\". By default is not enabled")
else:
    normalize = True if sys.argv[4] == "-norm" and len(sys.argv) == 5 else False

outputName = sys.argv[3]

def colors(n):
    """
    Generate n random distinct rgb colors
    :param n: number of color to generate
    :return: list of rgb colors
    """
    ret = []
    red = int(random.random() * 256)
    green = int(random.random() * 256)
    blue = int(random.random() * 256)
    step = 256 / n
    for i in range(n):
        red += step
        green += step
        blue += step
        red = int(red) % 256
        green = int(green) % 256
        blue = int(blue) % 256
        ret.append((red, green, blue))
    return ret


def getAverageRGB(clrs):
    """
    Given set of RGB colors, return average value of color as (r, g, b)
    :param clrs: set of RGB colors
    :return: average between rgb color
    """
    # no. of pixels in set
    npixels = len(clrs)

    sumRGB = [0, 0, 0]
    for c in clrs:
        for i in range(3):
            sumRGB[i] += c[i]

    avg = (round(sumRGB[0]/npixels), round(sumRGB[1]/npixels), round(sumRGB[2]/npixels))

    return avg


# Define random color for clusters
cluster_color = colors(cluster_number)

# Create k-means model
kmean = KMeans(n_clusters=cluster_number)

# Insert information of all pixels (rgb color and x,y position) into Pandas DataFrame
imageW = img.size[0]
imageH = img.size[1]


# Convert image into Lab color space
LABimg = skimage.color.rgb2lab(img)

data = {"r": [], "g": [], "b": [], "L": [], "A": [], "B": [], "x": [], "y": []}
for y in range(0, imageH):
    for x in range(0, imageW):

        rgb = img.getpixel((x, y))

        data["r"].append(rgb[0])
        data["g"].append(rgb[1])
        data["b"].append(rgb[2])
        data["L"].append(LABimg[y][x][0])
        data["A"].append(LABimg[y][x][1])
        data["B"].append(LABimg[y][x][2])
        data["x"].append(x)
        data["y"].append(y)

df = pd.DataFrame(data={"r": data["r"], "g": data["g"], "b": data["b"]})
df_lab = pd.DataFrame(data={"L": data["L"], "A": data["A"], "B": data["B"]})
df_pos = pd.DataFrame(data={"r": data["r"], "g": data["g"], "b": data["b"], "x": data["x"], "y": data["y"]})
df_lab_pos = pd.DataFrame(data={"L": data["L"], "A": data["A"], "B": data["B"], "x": data["x"], "y": data["y"]})

if normalize:
    # Standarize the values of features
    df = pd.DataFrame(data=preprocessing.normalize(df))
    df_pos = pd.DataFrame(data=preprocessing.normalize(df_pos))
    df_lab = pd.DataFrame(data=preprocessing.normalize(df_lab))
    df_lab_pos = pd.DataFrame(data=preprocessing.normalize(df_lab_pos))

# Run k-means
res = kmean.fit_predict(df)
res_pos = kmean.fit_predict(df_pos)
res_lab = kmean.fit_predict(df_lab)
res_lab_pos = kmean.fit_predict(df_lab_pos)

# Average color for each cluster
j = 0
avg_color = [[] for _ in range(cluster_number)]
avg_color_pos = [[] for _ in range(cluster_number)]
avg_color_lab = [[] for _ in range(cluster_number)]
avg_color_lab_pos = [[] for _ in range(cluster_number)]
for y in range(0, imageH):
    for x in range(0, imageW):
        avg_color[res[j]].append(img.getpixel((x, y)))
        avg_color_pos[res_pos[j]].append(img.getpixel((x, y)))
        avg_color_lab[res_lab[j]].append(img.getpixel((x, y)))
        avg_color_lab_pos[res_lab_pos[j]].append(img.getpixel((x, y)))
        j += 1

avg_color = [getAverageRGB(avg_c) for avg_c in avg_color]
avg_color_pos = [getAverageRGB(avg_c) for avg_c in avg_color_pos]
avg_color_lab = [getAverageRGB(avg_c) for avg_c in avg_color_lab]
avg_color_lab_pos = [getAverageRGB(avg_c) for avg_c in avg_color_lab_pos]

# Save segmented image
image = []
for i in range(0, 8):
    image.append(Image.new("RGB", (imageW, imageH)))

j = 0
for y in range(0, imageH):
    for x in range(0, imageW):
        # random color for:
        # rgb
        image[0].putpixel((x, y), cluster_color[res[j]])
        # rgb + position
        image[1].putpixel((x, y), cluster_color[res_pos[j]])
        # lab
        image[2].putpixel((x, y), cluster_color[res_lab[j]])
        # lab + position
        image[3].putpixel((x, y), cluster_color[res_lab_pos[j]])

        # avg color for:
        # rgb
        image[4].putpixel((x, y), avg_color[res[j]])
        # rgb + position
        image[5].putpixel((x, y), avg_color_pos[res_pos[j]])
        # lab
        image[6].putpixel((x, y), avg_color_lab[res_lab[j]])
        # lab + position
        image[7].putpixel((x, y), avg_color_lab_pos[res_lab_pos[j]])

        j += 1

fig, ax = plt.subplots(ncols=4, nrows=2, sharex=True, sharey=True, figsize=(20, 15))

j = 0
for i in range(len(ax)):
    for k in range(int(len(image)/2)):
        ax[i][k].imshow(image[j])
        if k == 0 or k == 2:
            name = "RGB "
        elif k == 1 or k == 3:
            name = "LAB "
        if i == 0:
            ax[i][k].set_title(name+"Without (x, y) position")
        else:
            ax[i][k].set_title(name + "With (x, y) position")
        j += 2
    j = 1

for a in ax:
    for i in range(int(len(image)/2)):
        a[i].axis('off')

plt.tight_layout()
plt.savefig(outputName, dpi=800)


"""
    Run Normalized Cut segmentation
"""

img = np.asarray(img)

# Segment the image using SLIC algorithm
labels1 = segmentation.slic(img, compactness=30, n_segments=400)

# Replace each pixel with the average RGB color of its region
out1 = color.label2rgb(labels1, img, kind='avg')

# Crate the Region Adjacency Graphs
# Each node in the RAG represents a set of pixels within image with the same
# label in labels. The weight between two adjacent regions represents how
# similar or dissimilar two regions are depending on the mode parameter
g = graph.rag_mean_color(img, labels1, mode='similarity')
labels2 = graph.cut_normalized(labels1, g)
out2 = color.label2rgb(labels2, img, kind='avg')

fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(6, 8))

ax[0].imshow(out1)
ax[0].set_title("Superpixel view")
ax[1].imshow(out2)
ax[1].set_title("NCut segmetnation result")

for a in ax:
    a.axis('off')

plt.tight_layout()

name = outputName[:-4]
outputName = name + "_ncut.png"
plt.savefig(outputName, dpi=600)

中间文件太大了传不上来,你们自己试下吧,我放下结果:
分割结果
3.watershed
这个方法是二分类的

import cv2 as cv
import  numpy as np

def water_image():
    print(src.shape)
    blurred = cv.pyrMeanShiftFiltering(src, 10, 100)    # 去除噪点

    # gray\binary image
    gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY)
    ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
    # cv.imshow("binary", binary)

    # morphology operation
    kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
    mb = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel, iterations=2)
    sure_bg = cv.dilate(mb, kernel, iterations=3)
    # cv.imshow("shape deal", sure_bg)

    # distance transform
    dist = cv.distanceTransform(mb, cv.DIST_L2, 3)
    dist_output = cv.normalize(dist, 0, 1.0, cv.NORM_MINMAX)
    # cv.imshow("distance transform", dist_output*70)

    ret, surface = cv.threshold(dist, dist.max()*0.6, 255, cv.THRESH_BINARY)
    # cv.imshow("find seed", surface)

    surface_fg = np.uint8(surface)
    unknown = cv.subtract(sure_bg, surface_fg)
    ret, markers = cv.connectedComponents(surface_fg)
    print(ret)

    # watershed transfrom
    markers += 1
    markers[unknown == 255] = 0
    markers = cv.watershed(src, markers=markers)
    src[markers == -1] = [0, 0, 255]
    cv.imshow("markers", markers)
    cv.imwrite("./markers.jpg",markers)
    cv.imshow("watershed segmentation", src)
    cv.imwrite('./temp.jpg', src)


src = cv.imread("./sample.jpg")
cv.imshow("ori", src)
water_image()
cv.waitKey(0)
cv.destroyAllWindows()

分割结果
4.graph-based segmentation
链接https://github.com/luisgabriel/image-segmentation
链接2https://github.com/salaee/pegbis
graph.py

class Node:
    def __init__(self, parent, rank=0, size=1):
        self.parent = parent
        self.rank = rank
        self.size = size

    def __repr__(self):
        return '(parent=%s, rank=%s, size=%s)' % (self.parent, self.rank, self.size)

class Forest:
    def __init__(self, num_nodes):
        self.nodes = [Node(i) for i in range(num_nodes)]
        self.num_sets = num_nodes

    def size_of(self, i):
        return self.nodes[i].size

    def find(self, n):
        temp = n
        while temp != self.nodes[temp].parent:
            temp = self.nodes[temp].parent

        self.nodes[n].parent = temp
        return temp

    def merge(self, a, b):
        if self.nodes[a].rank > self.nodes[b].rank:
            self.nodes[b].parent = a
            self.nodes[a].size = self.nodes[a].size + self.nodes[b].size
        else:
            self.nodes[a].parent = b
            self.nodes[b].size = self.nodes[b].size + self.nodes[a].size

            if self.nodes[a].rank == self.nodes[b].rank:
                self.nodes[b].rank = self.nodes[b].rank + 1

        self.num_sets = self.num_sets - 1

    def print_nodes(self):
        for node in self.nodes:
            print(node)

def create_edge(img, width, x, y, x1, y1, diff):
    vertex_id = lambda x, y: y * width + x
    w = diff(img, x, y, x1, y1)
    return (vertex_id(x, y), vertex_id(x1, y1), w)

def build_graph(img, width, height, diff, neighborhood_8=False):
    graph_edges = []
    for y in range(height):
        for x in range(width):
            if x > 0:
                graph_edges.append(create_edge(img, width, x, y, x-1, y, diff))

            if y > 0:
                graph_edges.append(create_edge(img, width, x, y, x, y-1, diff))

            if neighborhood_8:
                if x > 0 and y > 0:
                    graph_edges.append(create_edge(img, width, x, y, x-1, y-1, diff))

                if x > 0 and y < height-1:
                    graph_edges.append(create_edge(img, width, x, y, x-1, y+1, diff))

    return graph_edges

def remove_small_components(forest, graph, min_size):
    for edge in graph:
        a = forest.find(edge[0])
        b = forest.find(edge[1])

        if a != b and (forest.size_of(a) < min_size or forest.size_of(b) < min_size):
            forest.merge(a, b)

    return  forest

def segment_graph(graph_edges, num_nodes, const, min_size, threshold_func):
    # Step 1: initialization
    forest = Forest(num_nodes)
    weight = lambda edge: edge[2]
    sorted_graph = sorted(graph_edges, key=weight)
    threshold = [ threshold_func(1, const) for _ in range(num_nodes) ]

    # Step 2: merging
    for edge in sorted_graph:
        parent_a = forest.find(edge[0])
        parent_b = forest.find(edge[1])
        a_condition = weight(edge) <= threshold[parent_a]
        b_condition = weight(edge) <= threshold[parent_b]

        if parent_a != parent_b and a_condition and b_condition:
            forest.merge(parent_a, parent_b)
            a = forest.find(parent_a)
            threshold[a] = weight(edge) + threshold_func(forest.nodes[a].size, const)

    return remove_small_components(forest, sorted_graph, min_size)

main.py

import argparse
import logging
import time
from graph import build_graph, segment_graph
from random import random
from PIL import Image, ImageFilter
from skimage import io
import numpy as np


def diff(img, x1, y1, x2, y2):
    _out = np.sum((img[x1, y1] - img[x2, y2]) ** 2)
    return np.sqrt(_out)


def threshold(size, const):
    return (const * 1.0 / size)


def generate_image(forest, width, height):
    random_color = lambda: (int(random()*255), int(random()*255), int(random()*255))
    colors = [random_color() for i in range(width*height)]

    img = Image.new('RGB', (width, height))
    im = img.load()
    for y in range(height):
        for x in range(width):
            comp = forest.find(y * width + x)
            im[x, y] = colors[comp]

    return img.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT)


def get_segmented_image(sigma, neighbor, K, min_comp_size, input_file, output_file):
    if neighbor != 4 and neighbor!= 8:
        logger.warn('Invalid neighborhood choosed. The acceptable values are 4 or 8.')
        logger.warn('Segmenting with 4-neighborhood...')
    start_time = time.time()
    image_file = Image.open(input_file)

    size = image_file.size  # (width, height) in Pillow/PIL
    logger.info('Image info: {} | {} | {}'.format(image_file.format, size, image_file.mode))

    # Gaussian Filter
    smooth = image_file.filter(ImageFilter.GaussianBlur(sigma))
    smooth = np.array(smooth)
    
    logger.info("Creating graph...")
    graph_edges = build_graph(smooth, size[1], size[0], diff, neighbor==8)
    
    logger.info("Merging graph...")
    forest = segment_graph(graph_edges, size[0]*size[1], K, min_comp_size, threshold)

    logger.info("Visualizing segmentation and saving into: {}".format(output_file))
    image = generate_image(forest, size[1], size[0])
    image.save(output_file)

    logger.info('Number of components: {}'.format(forest.num_sets))
    logger.info('Total running time: {:0.4}s'.format(time.time() - start_time))


if __name__ == '__main__':
    # argument parser
    parser = argparse.ArgumentParser(description='Graph-based Segmentation')
    parser.add_argument('--sigma', type=float, default=1.0, 
                        help='a float for the Gaussin Filter')
    parser.add_argument('--neighbor', type=int, default=8, choices=[4, 8],
                        help='choose the neighborhood format, 4 or 8')
    parser.add_argument('--K', type=float, default=10.0, 
                        help='a constant to control the threshold function of the predicate')
    parser.add_argument('--min-comp-size', type=int, default=2000, 
                        help='a constant to remove all the components with fewer number of pixels')
    parser.add_argument('--input-file', type=str, default="./assets/seg_test.jpg", 
                        help='the file path of the input image')
    parser.add_argument('--output-file', type=str, default="./assets/seg_test_out.jpg", 
                        help='the file path of the output image')
    args = parser.parse_args()

    # basic logging settings
    logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
                    datefmt='%m-%d %H:%M')
    logger = logging.getLogger(__name__)

    get_segmented_image(args.sigma, args.neighbor, args.K, args.min_comp_size, args.input_file, args.output_file)

命令行python .\main.py --input-file="./sample.jpg" --output-file="./temp.jpg"
里面有参数需要调整一下,大家自己慢慢调整吧
graph_based结果
5.Meanshift
这个我就不用示例图像展示结果了,下面代码是我用于遥感影像的一个示例,其他方法也可以类似改成用于遥感影像。

# -*- coding: utf-8 -*-
import cv2
import gdal
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from pydensecrf.utils import compute_unary, create_pairwise_bilateral, \
    create_pairwise_gaussian, softmax_to_unary, unary_from_softmax,unary_from_labels
import pydensecrf.densecrf as dcrf

# Get im{read,write} from somewhere.
try:
    from cv2 import imread, imwrite
except ImportError:
    # Note that, sadly, skimage unconditionally import scipy and matplotlib,
    # so you'll need them if you don't have OpenCV. But you probably have them.
    from skimage.io import imread, imsave
    imwrite = imsave
    # TODO: Use scipy instead.

from utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian


def read_img(filename):
    dataset=gdal.Open(filename)

    im_width = dataset.RasterXSize
    im_height = dataset.RasterYSize

    im_geotrans = dataset.GetGeoTransform()
    im_proj = dataset.GetProjection()
    im_data = dataset.ReadAsArray(0,0,im_width,im_height)

    del dataset 
    return im_proj,im_geotrans,im_width, im_height,im_data


def write_img(filename, im_proj, im_geotrans, im_data):
    if 'int8' in im_data.dtype.name:
        datatype = gdal.GDT_Byte
    elif 'int16' in im_data.dtype.name:
        datatype = gdal.GDT_UInt16
    else:
        datatype = gdal.GDT_Float32

    if len(im_data.shape) == 3:
        im_bands, im_height, im_width = im_data.shape
    else:
        im_bands, (im_height, im_width) = 1,im_data.shape 

    driver = gdal.GetDriverByName("GTiff")
    dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)

    dataset.SetGeoTransform(im_geotrans)
    dataset.SetProjection(im_proj)

    if im_bands == 1:
        dataset.GetRasterBand(1).WriteArray(im_data)
    else:
        for i in range(im_bands):
            dataset.GetRasterBand(i+1).WriteArray(im_data[i])

if __name__ == '__main__':
    img_path = 'E:/xx/sb_test1.tif'
    im_proj, im_geotrans, im_width, im_height, im_data = read_img(img_path)
    im_data = im_data[0:3,...]
    im_data = im_data.transpose((2,1,0))
    im_temp = im_data.reshape((-1,3))
    im_temp = np.float32(im_temp)
    bandwidth=estimate_bandwidth(im_temp, quantile=0.2, n_samples=500)
    ms = MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=True)
    ms.fit_predict(im_temp)
    labels=ms.labels_
    cluster_centers = ms.cluster_centers_
    seg = labels.reshape((im_data.shape[0], im_data.shape[1]))
    seg = seg.transpose(1,0)
    seg_path = 'E:/xx/test/sb_test1_seg.tif'
    write_img(seg_path, im_proj, im_geotrans, seg)

6.最大熵分割
链接:https://blog.csdn.net/u011939755/article/details/88550948

import numpy as np
import cv2
def segment(img):
    """
    :param img:
    :return:
    """
    def calculate_current_entropy(hist, threshold):
        data_hist = hist.copy()
        background_sum = 0.
        target_sum = 0.
        for i in range(256):
            if i < threshold:  # 累积背景
                background_sum += data_hist[i]
            else:  # 累积目标
                target_sum += data_hist[i]
        background_ent = 0.
        target_ent = 0.
        for i in range(256):
            if i < threshold:  # 计算背景熵
                if data_hist[i] == 0:
                    continue
                ratio1 = data_hist[i] / background_sum
                background_ent -= ratio1 * np.log2(ratio1)
            else:
                if data_hist[i] == 0:
                    continue
                ratio2 = data_hist[i] / target_sum
                target_ent -= ratio2 * np.log2(ratio2)
        return target_ent + background_ent

    def max_entropy_segmentation(img):
        channels = [0]
        hist_size = [256]
        prange = [0, 256]
        hist = cv2.calcHist(img, channels, None, hist_size, prange)
        hist = np.reshape(hist, [-1])
        max_ent = 0.
        max_index = 0
        for i in range(256):
            cur_ent = calculate_current_entropy(hist, i)
            if cur_ent > max_ent:
                max_ent = cur_ent
                max_index = i
        ret, th = cv2.threshold(img, max_index, 255, cv2.THRESH_BINARY)
        return th
    img = max_entropy_segmentation(img)
    return img

if __name__ == "__main__":
    path = './sample.jpg'
    im = cv2.imread(path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im_re = segment(im)
    cv2.imwrite('./entroy_temp.jpg', im_re)

分割结果:
最大熵分割结果

  • 3
    点赞
  • 37
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

如雾如电

随缘

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值