垃圾分类优化结果测试,比对相似度

1.先把原来的mask标注图片从val.txt里面抽取出来

import sys
sys.path.append("..")
sys.path.insert(0, '.')
import torch
import torch.nn as nn
from PIL import Image
import numpy as np
import os #要导入os

torch.set_grad_enabled(False)
np.random.seed(123)

val_path = "F:\\1207garbage_classification\\0505_garbage_optimize\\val_mask_ori\\"     # val验证集的路径
os.makedirs(val_path,exist_ok=True)
data = []
i = 0

for line in open("F:\\1207garbage_classification\\0505_garbage_optimize\\val.txt", "r"):  # 设置文件对象并读取每一行文件
    data.append(line)
for img in data:

    filename = img.split(",")

    print(filename[1])
    filename = filename[1].replace('\n', '')
    a = Image.open("F:\\1207garbage_classification\\0505_garbage_optimize\\"+filename)   # 原图img的路径
    Image_copy = Image.Image.copy(a)
    b = os.path.split(filename)[-1]  # 去路径下最后一级
    # Image.Image.save(val_path + b, Image_copy)
    Image.Image.save(Image_copy, fp=val_path + b)
    i += 1

print("一共验证集val多少图片:", i)

2.把验证集里面的原图取出来进行测试,demo出掩码图

import sys
sys.path.append("..")
sys.path.insert(0, '.')
import argparse
import torch
import torch.nn as nn
from PIL import Image
import numpy as np
import cv2
import time
import os #要导入os

import lib.transform_cv2 as T
from lib.models import model_factory
from configs import set_cfg_from_file

from tqdm import tqdm

torch.set_grad_enabled(False)
np.random.seed(123)

sys.path.append('G:\\addwater0906\\')

val_path = "G:\\addwater0906\\val\\"     # val验证集的路径
data = []
i = 0

for line in open("G:\\addwater0906\\val.txt", "r"):  # 设置文件对象并读取每一行文件
    data.append(line)
for img in data:

    filename = img.split(",")

    print(filename[0])
    a = cv2.imread("G:\\addwater0906\\"+filename[0])   # 原图img的路径

    b = os.path.split(filename[0])[-1]  # 去路径下最后一级
    cv2.imwrite(val_path + b, a)
    i += 1

print("一共验证集val多少图片:", i)
# args
parse = argparse.ArgumentParser()
#parse.add_argument('--config', dest='config', type=str, default='BiSeNet-master/configs/bisenetv2_city.py',)
parse.add_argument('--config', dest='config', type=str, default='./configs/bisenetv2_city.py',)
# parse.add_argument('--weight-path', type=str, default='BiSeNet-master/res/model_final_v2_city.pth',)
parse.add_argument('--weight-path', type=str, default='./waterv3_model_final.pth',)  # 模型的路径!!!!
#parse.add_argument('--weight-path', type=str, default='./res/model_final.pth',)
#parse.add_argument('--img-path', dest='img_path', type=str, default='BiSeNet-master/example.png',)
parse.add_argument('--img_path', dest='img_path', type=str, default= val_path,)
args = parse.parse_args()
cfg = set_cfg_from_file(args.config)



palette = np.random.randint(0, 256, (256, 3), dtype=np.uint8)

# define model
net = model_factory[cfg.model_type](cfg.n_cats, aux_mode='pred')
net.load_state_dict(torch.load(args.weight_path, map_location='cpu'), strict=False)
net.eval()
# net.cuda()

# prepare data
to_tensor = T.ToTensor(
    mean=(0.3257, 0.3690, 0.3223), # city, rgb
    std=(0.2112, 0.2148, 0.2115),
)
dir_path = args.img_path




for file_name in tqdm(os.listdir(dir_path)):
    path = val_path + file_name #这里表示其中一张图像的路径
    im = cv2.imread(path)[:, :, ::-1]  # 主要是这里读入的路径
    # 后面的接着写就行
    # im = to_tensor(dict(im=im, lb=None))['im'].unsqueeze(0).cuda()
    im = to_tensor(dict(im=im, lb=None))['im'].unsqueeze(0)
    # inference
    t1 = time.time()
    out = net(im).squeeze().detach().cpu().numpy()



    # pred = palette[out]
    cv2.imwrite('G:\\addwater0906\\val_mask\\' + file_name, out)    # mask输出全黑路径
# print('图像误分割大于8%个数:', count1)
# print('图像误分割大于5%的个数:', count2)

# cv2.imwrite('output.jpg', pred)
# cv2.namedWindow("output", cv2.WINDOW_NORMAL)
# cv2.imshow("output", pred)
# cv2.waitKey(0)
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import cv2


def create_pascal_label_colormap():
    colormap = np.zeros((256, 3), dtype=int)
    ind = np.arange(256, dtype=int)

    for shift in reversed(range(8)):
        for channel in range(3):
            colormap[:, channel] |= ((ind >> channel) & 1) << shift
        ind >>= 3

    return colormap


def label_to_color_image(label):
    if label.ndim != 2:
        raise ValueError('Expect 2-D input label')

    colormap = create_pascal_label_colormap()

    if np.max(label) >= len(colormap):
        raise ValueError('label value too large.')

    return colormap[label]


def vis_segmentation(image, seg_map):
    """
    输入图片和分割 mask 的可视化.
    """
    plt.figure(figsize=(15, 5))
    grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])

    plt.subplot(grid_spec[0])
    plt.imshow(image)
    plt.axis('off')
    plt.title('input image')

    plt.subplot(grid_spec[1])
    seg_image = label_to_color_image(seg_map).astype(np.uint8)
    plt.imshow(seg_image)
    plt.axis('off')
    plt.title('segmentation map')

    plt.subplot(grid_spec[2])
    plt.imshow(image)
    plt.imshow(seg_image, alpha=0.5)
    plt.axis('off')
    plt.title('segmentation overlay')

    unique_labels = np.unique(seg_map)
    ax = plt.subplot(grid_spec[3])
    plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
    ax.yaxis.tick_right()
    plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
    plt.xticks([], [])
    ax.tick_params(width=0.0)
    plt.grid('off')
    # plt.imsave('G:\\1\\' + image)
    # plt.show()


LABEL_NAMES = np.asarray(['background', 'water'])  # 假设只有两类
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)

img_path = 'G:\\addwater0906\\val\\'   # img原图路径
png_path = 'G:\\addwater0906\\val_mask\\'   # mask输出全黑路径
i = 0
for img in os.listdir(img_path):
    print(img)
    imgfile = img_path + img
    pngfile = png_path + img
    new_path = "G:\\addwater0906\\pltsave\\" + img    #保存新的验证集val.txt测试结果可视化路径!!!
    img = cv2.imread(imgfile, 1)
    img = img[:, :, ::-1]
    seg_map = cv2.imread(pngfile, 0)
    vis_segmentation(img, seg_map)
    plt.savefig(new_path)
    plt.close()
    i +=1

print('可视化总数量:',i)
print('Done.')

3.把原图mask和推理出来的mask进行像素级别的比对相似度

import os

from PIL import Image
import time
from tqdm import tqdm

def pixel_equal(image1, image2, x, y):
    """
    判断两个像素是否相同
    :param image1: 图片1
    :param image2: 图片2
    :param x: 位置x
    :param y: 位置y
    :return: 像素是否相同
    """
    # 取两个图片像素点
    piex1 = image1.load()[x, y]#piex1:(1,1,1)
    piex2 = image2.load()[x, y]
    threshold = 1
    # 比较每个像素点的RGB值是否在阈值范围内,若两张图片的RGB值都在某一阈值内,则我们认为它的像素点是一样的
    if abs(piex1[0] - piex2[0]) < threshold and abs(piex1[1]- piex2[1]) < threshold and abs(piex1[2] - piex2[2]) < threshold:
        return True
    else:
        return False

def compare(image1, image2):
    """
    进行比较
    :param image1:图片1
    :param image2: 图片2
    :return:
    """
    left = 0		# 坐标起始位置
    right_num = 0	# 记录相同像素点个数
    false_num = 0	# 记录不同像素点个数
    all_num = 0		# 记录所有像素点个数
    for i in range(left, image1.size[0]):
        for j in range(image1.size[1]):
            if pixel_equal(image1, image2, i, j):
                right_num += 1
            else:
                false_num += 1
            all_num += 1
    same_rate = right_num / all_num		# 相同像素点比例
    nosame_rate = false_num / all_num	# 不同像素点比例
    # print("same_rate: ", same_rate)
    # print("nosame_rate: ", nosame_rate)
    return same_rate,nosame_rate


if __name__ == "__main__":
    # t1 = time.time()
    img_ori_path=r"F:\1207garbage_classification\0505_garbage_optimize\val_mask_ori"
    img_demo_path = r"F:\1207garbage_classification\0505_garbage_optimize\val_mask_demo"
    sum_same_rate = 0
    sum_nosame_rate =0
    n = len(os.listdir(img_ori_path))
    for image1 in tqdm(os.listdir(img_ori_path)):
        print(image1)
        image1_path = os.path.join(img_ori_path,image1)
        image2_path = os.path.join(img_demo_path, image1)
        image1 = Image.open(image1_path).convert("RGB")
        image2 = Image.open(image2_path).convert("RGB")

        # image1 = Image.open(r"F:\1207garbage_classification\0505_garbage_optimize\val_mask_demo\0_3.png")
        # image2 = Image.open(r"F:\1207garbage_classification\0505_garbage_optimize\val_mask_ori\0_3.png")

        same_rate,nosame_rate = compare(image1, image2)

        print("same_rate:%.2f,nosame_rate:%.2f" % (same_rate, nosame_rate))
        sum_same_rate += same_rate
        sum_nosame_rate += nosame_rate

        # t2 = time.time()
        # print("t=", t2-t1)
    avg_same_rate = sum_same_rate / n
    avg_nosame_rate = sum_nosame_rate / n
    print("avg_same_rate:%.2f,avg_nosame_rate:%.2f" %(avg_same_rate,avg_nosame_rate))

###############################################
# # -*- coding: utf-8 -*-
# # !/usr/bin/env python
# # @Time    : 2018/11/17 14:52
# # @Author  : xhh
# # @Desc    : 余弦相似度计算
# # @File    : difference_image_consin.py
# # @Software: PyCharm
# from PIL import Image
# from numpy import average, dot, linalg
#
#
# # 对图片进行统一化处理
# def get_thum(image, size=(64, 64), greyscale=False):
#     # 利用image对图像大小重新设置, Image.ANTIALIAS为高质量的
#     image = image.resize(size, Image.ANTIALIAS)
#     if greyscale:
#         # 将图片转换为L模式,其为灰度图,其每个像素用8个bit表示
#         image = image.convert('L')
#     return image
#
#
# # 计算图片的余弦距离
# def image_similarity_vectors_via_numpy(image1, image2):
#     image1 = get_thum(image1)
#     image2 = get_thum(image2)
#     images = [image1, image2]
#     vectors = []
#     norms = []
#     for image in images:
#         vector = []
#         for pixel_tuple in image.getdata():
#             vector.append(average(pixel_tuple))
#         vectors.append(vector)
#         # linalg=linear(线性)+algebra(代数),norm则表示范数
#         # 求图片的范数??
#         norms.append(linalg.norm(vector, 2))
#     a, b = vectors
#     a_norm, b_norm = norms
#     # dot返回的是点积,对二维数组(矩阵)进行计算
#     res = dot(a / a_norm, b / b_norm)
#     return res
#
#
# image1 = Image.open(r"F:\1207garbage_classification\0505_garbage_optimize\val_mask_demo\0_3.png")
# image2 = Image.open(r"F:\1207garbage_classification\0505_garbage_optimize\val_mask_ori\0_3.png")
# cosin = image_similarity_vectors_via_numpy(image1, image2)
# print('图片余弦相似度', cosin)

对比的结果,如图所示:
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值