个人使用:高程和RGB

统一两者样本

import os
import cv2
# source = 'dataset/sat_train/'
rgb_path ="/mnt/sdb1/fenghaixia/fourtestys/dataset/valid/"
h_path ="/mnt/sdb1/fenghaixia/fourtesth/dataset/valid/"
 
rgb_names = filter(lambda x: x.find('tif')!=-1, os.listdir(rgb_path))
h_names = filter(lambda x: x.find('tif')!=-1, os.listdir(h_path))
#trainlist = list(map(lambda x: x[:-8], imagelist))
for f in rgb_names:
    h_name = h_path + f.strip()
    if not os.path.exists(h_name):
        os.remove(rgb_path + f.strip())
        print(rgb_path + f.strip())
for f in h_names:
    rgb_name = rgb_path + f.strip()
    if not os.path.exists(rgb_name):
        os.remove(h_path + f.strip())
        print(h_path + f.strip())

rgb

import torch
from torch.autograd import Variable as V

import cv2
import os
import shutil
from PIL import Image
from PIL import ImageFile
import numpy as np
import matplotlib.pyplot as plt

from osgeo import gdal,ogr,osr

from time import time
from utils.utils_metrics import compute_mIoU
from utils.utils_metrics import compute_IoU

from networks.dinknet import DinkNet34
from framework import MyFrame
from loss import dice_bce_loss
from data import ImageFolder

from PyQt5 import QtCore, QtGui
import sys
from PyQt5.QtWidgets import QMainWindow,QApplication,QWidget
from PyQt5.QtCore import QEventLoop, QTimer, QThread
BATCHSIZE_PER_CARD = 16
def saveList(pathName):
    for file_name in pathName:
        #f=open("C:/Users/Administrator/Desktop/DeepGlobe-Road-Extraction-link34-py3/dataset/real/gt.txt", "x")
        with open("./dataset/gt.txt", "a") as f:
            f.write(file_name.split(".")[0] + "\n")
        f.close

def dirList(gt_dir,path_list):
    for i in range(0, len(path_list)):
        path = os.path.join(gt_dir, path_list[i])
    if os.path.isdir(path):
        saveList(os.listdir(path))

def DeleteShp(layer,count):
    for i in range(count):
        feature = layer.GetFeature(i) 
        code = feature.GetField('value')
        if(code==0):
            id = feature.GetFID()
            layer.DeleteFeature(int(id))

def GridToShp(input_path,Outshp_path):
    inraster = gdal.Open(input_path)
    im_data = inraster.GetRasterBand(1)    
    driver = ogr.GetDriverByName("ESRI Shapefile")
    if os.access(Outshp_path,os.F_OK):  
        driver.DeleteDataSource(Outshp_path)
    ds = driver.CreateDataSource(Outshp_path)  
    spatialref = osr.SpatialReference()
    # proj = osr.SpatialReference(wkt = inraster.GetProjection())
    # epsg = int(proj.GetAttrValue("AUTHORITY",1))  
    # spatialref.ImportFromEPSG(epsg) 
    spatialref.ImportFromWkt(inraster.GetProjection())  
    geomtype = ogr.wkbMultiPolygon  
  
    layer = ds.CreateLayer(Outshp_path[:-4],srs=spatialref,geom_type=geomtype) 
    layer.CreateField(ogr.FieldDefn('value',ogr.OFTReal))
    gdal.FPolygonize(im_data,im_data,layer,0,[],None)
    ds.SyncToDisk()
    ds.Destroy()
    
    ds = ogr.Open(Outshp_path,True)
    Layer = ds.GetLayer(0)
    Count = Layer.GetFeatureCount()
    DeleteShp(Layer,Count)
    ds.Destroy()
class TTAFrame():
    def __init__(self, net):
        self.net = net().cuda()
        self.net = torch.nn.DataParallel(self.net, device_ids=range(torch.cuda.device_count()))
        
    def test_one_img_from_path(self, path, evalmode = True):
        if evalmode:
            self.net.eval()
        batchsize = torch.cuda.device_count() * BATCHSIZE_PER_CARD
        if batchsize >= 8:
            return self.test_one_img_from_path_1(path)
        elif batchsize >= 4:
            return self.test_one_img_from_path_2(path)
        elif batchsize >= 2:
            return self.test_one_img_from_path_4(path)

    def test_one_img_from_path_8(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.array(img1)[:,:,::-1]
        img4 = np.array(img2)[:,:,::-1]
        
        img1 = img1.transpose(0,3,1,2)
        img2 = img2.transpose(0,3,1,2)
        img3 = img3.transpose(0,3,1,2)
        img4 = img4.transpose(0,3,1,2)
        
        img1 = V(torch.Tensor(np.array(img1, np.float32)/255.0 * 3.2 -1.6).cuda())
        img2 = V(torch.Tensor(np.array(img2, np.float32)/255.0 * 3.2 -1.6).cuda())
        img3 = V(torch.Tensor(np.array(img3, np.float32)/255.0 * 3.2 -1.6).cuda())
        img4 = V(torch.Tensor(np.array(img4, np.float32)/255.0 * 3.2 -1.6).cuda())
        
        maska = self.net.forward(img1).squeeze().cpu().data.numpy()
        maskb = self.net.forward(img2).squeeze().cpu().data.numpy()
        maskc = self.net.forward(img3).squeeze().cpu().data.numpy()
        maskd = self.net.forward(img4).squeeze().cpu().data.numpy()
        
        mask1 = maska + maskb[:,::-1] + maskc[:,:,::-1] + maskd[:,::-1,::-1]
        mask2 = mask1[0] + np.rot90(mask1[1])[::-1,::-1]
        
        return mask2

    def test_one_img_from_path_4(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.array(img1)[:,:,::-1]
        img4 = np.array(img2)[:,:,::-1]
        
        img1 = img1.transpose(0,3,1,2)
        img2 = img2.transpose(0,3,1,2)
        img3 = img3.transpose(0,3,1,2)
        img4 = img4.transpose(0,3,1,2)
        
        img1 = V(torch.Tensor(np.array(img1, np.float32)/255.0 * 3.2 -1.6).cuda())
        img2 = V(torch.Tensor(np.array(img2, np.float32)/255.0 * 3.2 -1.6).cuda())
        img3 = V(torch.Tensor(np.array(img3, np.float32)/255.0 * 3.2 -1.6).cuda())
        img4 = V(torch.Tensor(np.array(img4, np.float32)/255.0 * 3.2 -1.6).cuda())
        
        maska = self.net.forward(img1).squeeze().cpu().data.numpy()
        maskb = self.net.forward(img2).squeeze().cpu().data.numpy()
        maskc = self.net.forward(img3).squeeze().cpu().data.numpy()
        maskd = self.net.forward(img4).squeeze().cpu().data.numpy()
        
        mask1 = maska + maskb[:,::-1] + maskc[:,:,::-1] + maskd[:,::-1,::-1]
        mask2 = mask1[0] + np.rot90(mask1[1])[::-1,::-1]
        
        return mask2
    
    def test_one_img_from_path_2(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.concatenate([img1,img2])
        img4 = np.array(img3)[:,:,::-1]
        img5 = img3.transpose(0,3,1,2)
        img5 = np.array(img5, np.float32)/255.0 * 3.2 -1.6
        img5 = V(torch.Tensor(img5).cuda())
        img6 = img4.transpose(0,3,1,2)
        img6 = np.array(img6, np.float32)/255.0 * 3.2 -1.6
        img6 = V(torch.Tensor(img6).cuda())
        
        maska = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)
        maskb = self.net.forward(img6).squeeze().cpu().data.numpy()
        
        mask1 = maska + maskb[:,:,::-1]
        mask2 = mask1[:2] + mask1[2:,::-1]
        mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]
        
        return mask3
    
    def test_one_img_from_path_1(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.concatenate([img1,img2])
        img4 = np.array(img3)[:,:,::-1]
        img5 = np.concatenate([img3,img4]).transpose(0,3,1,2)
        img5 = np.array(img5, np.float32)/255.0 * 3.2 -1.6
        img5 = V(torch.Tensor(img5).cuda())
        
        mask = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)
        mask1 = mask[:4] + mask[4:,:,::-1]
        mask2 = mask1[:2] + mask1[2:,::-1]
        mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]
        
        return mask3

    def load(self, path):
    #   new_state_dict = OrderedDict()
    #  for key, value in torch.load(path).items():
    #     name = 'module.' + key
        #    new_state_dict[name] = value
        #model.load_state_dict(new_state_dict)
        #model = torch.load(path)
        #model.pop('module.finaldeconv1.weight')
        #model.pop('module.finalconv3.weight')
        #self.net.load_state_dict(model,strict=False)
        self.net.load_state_dict(torch.load(path))

#os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 指定第一块GPU可用

# config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 程序最多只能占用指定gpu50%的显存,服务器上注释掉这句

ImageFile.LOAD_TRUNCATED_IMAGES = True

Image.MAX_IMAGE_PIXELS = None

slide_window = 512  # 大的滑动窗口
step_length = 256

# 1.膨胀图像
print('开始膨胀预测!')
source = 'dataset/sat_test/'
sat_path ="./dataset/sat_test/"
test_path ="./dataset/test/"
if os.path.exists(test_path):
        shutil.rmtree(test_path)  # 递归删除文件夹下的所有内容包扩文件夹本身
os.mkdir(test_path)
file_names = filter(lambda x: x.find('tif')!=-1, os.listdir(sat_path))
original='./Big_Image_Predict_Result/'
if os.path.exists(original):
        shutil.rmtree(original)  # 递归删除文件夹下的所有内容包扩文件夹本身
os.mkdir(original)


for file in file_names:
    img = os.path.join(sat_path, file)
    fname, ext = os.path.splitext(img)
    base_name = os.path.basename(fname)
    change_name=test_path+base_name+'_sat.tif'
    if not os.path.isfile(change_name):
    #if not os.path.isfile(filePath+'\\'+base_name[0]+'_mask.png'):
            print('原始影像改名成功:"'+file+'"')
            shutil.copy(sat_path+file,change_name)

test_list = os.listdir(test_path) 
# # print(file_list)

for file in test_list:
    Image_Path = test_path+file

    #print(Image_Path)
    #print(Mask_Path)

    image = Image.open(Image_Path)
    image_name = file[:-4]
    width = 6060  # 获取图像的宽
    height = 6060  # 获取图像的高

    right_fill = step_length - (width % step_length)
    bottom_fill = step_length - (height % step_length)

    width_path_number = int((width + right_fill) / step_length)  # 横向切成的小图的数量
    height_path_number = int((height + right_fill) / step_length)  # 纵向切成的小图的数量

    image = np.array(image)
    image=image[:6060,:6060,:]
    # image[:,:,3]=image[:,:,1]

    image = cv2.copyMakeBorder(image, top=0, bottom=bottom_fill, left=0, right=right_fill,
                            borderType=cv2.BORDER_CONSTANT, value=0)

    image = cv2.copyMakeBorder(image, top=step_length // 2, bottom=step_length // 2, left=step_length // 2,
                            right=step_length // 2,
                            borderType=cv2.BORDER_CONSTANT, value=0)  # 填充1/2步长的外边框
    # cv2.namedWindow('swell',cv2.WINDOW_NORMAL)
    # cv2.imshow('swell',image) 
    # cv2.waitKey(0)
    print('图像膨胀步骤完成!')


    # 2.将膨胀后的大图按照滑窗裁剪
    crop_source = './dataset/'
    #tar=os.path.join('./dataset/',file[:-8]+'/'+'Image_Crop_Result/')
    tar=os.path.join('./dataset/',file[:-8])
    #shutil.rmtree(r"C:\Users\Administrator\Desktop\DeepGlobe-Road-Extraction-link34\dataset\Image_Crop_Result")  # 递归删除文件夹下的所有内容包扩文件夹本身
    if os.path.exists(tar):
        shutil.rmtree(tar)  # 递归删除文件夹下的所有内容包扩文件夹本身
    os.mkdir(tar)
    target=os.path.join(tar,'Image_Crop_Result/')
    os.mkdir(target)
    image_crop_addr = target  # 图像裁剪后存储的文件夹
    image = Image.fromarray(image)  # 将图片格式从numpy转回PIL
    l = 0
    for j in range(height_path_number):
        for i in range(width_path_number):
            box = (i * step_length, j * step_length, i * step_length + slide_window, j * step_length + slide_window)
            small_image = image.crop(box)
            small_image.save(
                image_crop_addr + image_name[:-4] + '({},{})@{:09d}_sat.tif'.format(j, i, l), quality=95)
            l = l + 1

    print('膨胀后大图滑窗裁剪步骤完成!')
    

    # 3、对上面裁剪得到的小图进行推理
    #targ=os.path.join(sat_path,os.path.pardir)
    print('开始预测!')
    test=os.path.join(tar,'Image_Predict_Result/')
    if os.path.exists(test):
        shutil.rmtree(test)
    os.mkdir(test)
    # path =target+ '*.tif'
    # print(path)
    # expanded_images_crop = glob.glob(path)
    # print(expanded_images_crop)

    # # model = load_model("unet_cancer_2021-11-16__01_27_12.h5")  # 加载模型
    # # 对小图一次进行单独预测,再将预测图保存为彩色索引图像
    # for k in expanded_images_crop:
    
    val = os.listdir(target)
    #solver = TTAFrame(LinkNet34)
    solver = TTAFrame(DinkNet34)
    solver.load('weights/log01_Dink34.th')
    tic = time()
    for i,name in enumerate(val):
        if i%10 == 0:
            print(str(i/10)+'     %.2f'%(time()-tic))
        mask = solver.test_one_img_from_path(target+name)
        # mask[mask>4.0] = 255
        # mask[mask<=4.0] = 0
        mask = np.concatenate([mask[:,:,None],mask[:,:,None],mask[:,:,None]],axis=2)
        cv2.imwrite(test+name[:-7]+'mask.png',mask.astype(np.uint8))
    
    print('预测步骤完成!')
    

    # 4.将膨胀过的图裁剪回原来的大小
    recover=os.path.join(tar,'Image_Recover/')
    if os.path.exists(recover):
        shutil.rmtree(recover)  # 递归删除文件夹下的所有内容包扩文件夹本身
    os.mkdir(recover)

    val = os.listdir(test)
    for i,expanded_image in enumerate(val):
        img = Image.open(test+expanded_image)
        img_name = os.path.basename(test+expanded_image)
        box = (128, 128, 384, 384)
        original_image = img.crop(box)
        original_image.save(recover + img_name, quality=95)
    print('图像裁剪回原来大小步骤完成!')

    # 5、图片拼接
    IMAGES_PATH = recover  # 图片集地址
    IMAGES_FORMAT = ['.png']  # 图片格式
    IMAGE_SIZE =  256 # 每张小图片的大小
    original='./Big_Image_Predict_Result/'
    # if not os.path.exists(original):
    #     #shutil.rmtree(original)  # 递归删除文件夹下的所有内容包扩文件夹本身
    #     os.mkdir(original)

    # 获取图片集地址下的所有图片名称
    image_names = [name for name in os.listdir(IMAGES_PATH) for item in IMAGES_FORMAT if
                os.path.splitext(name)[1] == item]
    
    image_names.sort(key=lambda x:int(x[-18:-9]))  # 这句不能少,os.listdir得到的文件没有顺序,必须进行排序
    
    IMAGE_ROW = int(height_path_number)  # 图片间隔,也就是合并成一张图后,一共有几行
    IMAGE_COLUMN = int(width_path_number)  # 图片间隔,也就是合并成一张图后,一共有几列
    # 简单的对于参数的设定和实际图片集的大小进行数量判断
    if len(image_names) != IMAGE_ROW * IMAGE_COLUMN:
        raise ValueError("合成图片的参数和要求的数量不能匹配!")

    to_image = Image.new('RGB', (IMAGE_COLUMN * IMAGE_SIZE, IMAGE_ROW * IMAGE_SIZE))  # 创建一个新图
    # 循环遍历,把每张图片按顺序粘贴到对应位置上
    for y in range(1, IMAGE_ROW + 1):
        for x in range(1, IMAGE_COLUMN + 1):
            #print(image_names[IMAGE_COLUMN * (y - 1) + x - 1])
            from_image = Image.open(IMAGES_PATH + image_names[IMAGE_COLUMN * (y - 1) + x - 1]).resize(
                (IMAGE_SIZE, IMAGE_SIZE), Image.ANTIALIAS)
            to_image.paste(from_image, ((x - 1) * IMAGE_SIZE, (y - 1) * IMAGE_SIZE))

    # 拼接完的大图的右侧和下侧有多余填充上去的部分,应裁掉
    box2 = (0, 0, int(to_image.size[0] - right_fill / (width + right_fill) * to_image.size[0]),
            int(to_image.size[1] - bottom_fill / (height + bottom_fill) * to_image.size[1]))
    original_mask = to_image.crop(box2)

    original_mask.save(original + image_name[:-4] + "_mask.png", quality=95)  # 保存新图
    
    print('图像拼接步骤完成!')
    shutil.rmtree(tar)  # 递归删除文件夹下的所有内容包扩文件夹本身

print('********')
print("Finish膨胀预测!") 
print('********')

预测 iou 转矢量 

import torch
from torch.autograd import Variable as V
 
import cv2
import os
import shutil
from PIL import Image
from PIL import ImageFile
import numpy as np
import matplotlib.pyplot as plt
 
from osgeo import gdal,ogr,osr
 
from time import time
from utils.utils_metrics import compute_mIoU
from utils.utils_metrics import compute_IoU
 
from networks.dinknet import DinkNet34
from framework import MyFrame
from loss import dice_bce_loss
from data import ImageFolder
 
from PyQt5 import QtCore, QtGui
import sys
from PyQt5.QtWidgets import QMainWindow,QApplication,QWidget
from PyQt5.QtCore import QEventLoop, QTimer, QThread
BATCHSIZE_PER_CARD = 16
def saveList(pathName):
    for file_name in pathName:
        #f=open("C:/Users/Administrator/Desktop/DeepGlobe-Road-Extraction-link34-py3/dataset/real/gt.txt", "x")
        with open("./dataset/gt.txt", "a") as f:
            f.write(file_name.split(".")[0] + "\n")
        f.close
 
def dirList(gt_dir,path_list):
    for i in range(0, len(path_list)):
        path = os.path.join(gt_dir, path_list[i])
    if os.path.isdir(path):
        saveList(os.listdir(path))
 
def DeleteShp(layer,count):
    for i in range(count):
        feature = layer.GetFeature(i) 
        code = feature.GetField('value')
        if(code==0):
            id = feature.GetFID()
            layer.DeleteFeature(int(id))
 
def GridToShp(input_path,Outshp_path):
    inraster = gdal.Open(input_path)
    im_data = inraster.GetRasterBand(1)    
    driver = ogr.GetDriverByName("ESRI Shapefile")
    if os.access(Outshp_path,os.F_OK):  
        driver.DeleteDataSource(Outshp_path)
    ds = driver.CreateDataSource(Outshp_path)  
    spatialref = osr.SpatialReference()
    # proj = osr.SpatialReference(wkt = inraster.GetProjection())
    # epsg = int(proj.GetAttrValue("AUTHORITY",1))  
    # spatialref.ImportFromEPSG(epsg) 
    spatialref.ImportFromWkt(inraster.GetProjection())  
    geomtype = ogr.wkbMultiPolygon  
  
    layer = ds.CreateLayer(Outshp_path[:-4],srs=spatialref,geom_type=geomtype) 
    layer.CreateField(ogr.FieldDefn('value',ogr.OFTReal))
    gdal.FPolygonize(im_data,im_data,layer,0,[],None)
    ds.SyncToDisk()
    ds.Destroy()
    
    ds = ogr.Open(Outshp_path,True)
    Layer = ds.GetLayer(0)
    Count = Layer.GetFeatureCount()
    DeleteShp(Layer,Count)
    ds.Destroy()
class TTAFrame():
    def __init__(self, net):
        self.net = net().cuda()
        self.net = torch.nn.DataParallel(self.net, device_ids=range(torch.cuda.device_count()))
        
    def test_one_img_from_path(self, path, evalmode = True):
        if evalmode:
            self.net.eval()
        batchsize = torch.cuda.device_count() * BATCHSIZE_PER_CARD
        if batchsize >= 8:
            return self.test_one_img_from_path_1(path)
        elif batchsize >= 4:
            return self.test_one_img_from_path_2(path)
        elif batchsize >= 2:
            return self.test_one_img_from_path_4(path)
 
    def test_one_img_from_path_8(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.array(img1)[:,:,::-1]
        img4 = np.array(img2)[:,:,::-1]
        
        img1 = img1.transpose(0,3,1,2)
        img2 = img2.transpose(0,3,1,2)
        img3 = img3.transpose(0,3,1,2)
        img4 = img4.transpose(0,3,1,2)
        
        img1 = V(torch.Tensor(np.array(img1, np.float32)/255.0 * 3.2 -1.6).cuda())
        img2 = V(torch.Tensor(np.array(img2, np.float32)/255.0 * 3.2 -1.6).cuda())
        img3 = V(torch.Tensor(np.array(img3, np.float32)/255.0 * 3.2 -1.6).cuda())
        img4 = V(torch.Tensor(np.array(img4, np.float32)/255.0 * 3.2 -1.6).cuda())
        
        maska = self.net.forward(img1).squeeze().cpu().data.numpy()
        maskb = self.net.forward(img2).squeeze().cpu().data.numpy()
        maskc = self.net.forward(img3).squeeze().cpu().data.numpy()
        maskd = self.net.forward(img4).squeeze().cpu().data.numpy()
        
        mask1 = maska + maskb[:,::-1] + maskc[:,:,::-1] + maskd[:,::-1,::-1]
        mask2 = mask1[0] + np.rot90(mask1[1])[::-1,::-1]
        
        return mask2
 
    def test_one_img_from_path_4(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.array(img1)[:,:,::-1]
        img4 = np.array(img2)[:,:,::-1]
        
        img1 = img1.transpose(0,3,1,2)
        img2 = img2.transpose(0,3,1,2)
        img3 = img3.transpose(0,3,1,2)
        img4 = img4.transpose(0,3,1,2)
        
        img1 = V(torch.Tensor(np.array(img1, np.float32)/255.0 * 3.2 -1.6).cuda())
        img2 = V(torch.Tensor(np.array(img2, np.float32)/255.0 * 3.2 -1.6).cuda())
        img3 = V(torch.Tensor(np.array(img3, np.float32)/255.0 * 3.2 -1.6).cuda())
        img4 = V(torch.Tensor(np.array(img4, np.float32)/255.0 * 3.2 -1.6).cuda())
        
        maska = self.net.forward(img1).squeeze().cpu().data.numpy()
        maskb = self.net.forward(img2).squeeze().cpu().data.numpy()
        maskc = self.net.forward(img3).squeeze().cpu().data.numpy()
        maskd = self.net.forward(img4).squeeze().cpu().data.numpy()
        
        mask1 = maska + maskb[:,::-1] + maskc[:,:,::-1] + maskd[:,::-1,::-1]
        mask2 = mask1[0] + np.rot90(mask1[1])[::-1,::-1]
        
        return mask2
    
    def test_one_img_from_path_2(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.concatenate([img1,img2])
        img4 = np.array(img3)[:,:,::-1]
        img5 = img3.transpose(0,3,1,2)
        img5 = np.array(img5, np.float32)/255.0 * 3.2 -1.6
        img5 = V(torch.Tensor(img5).cuda())
        img6 = img4.transpose(0,3,1,2)
        img6 = np.array(img6, np.float32)/255.0 * 3.2 -1.6
        img6 = V(torch.Tensor(img6).cuda())
        
        maska = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)
        maskb = self.net.forward(img6).squeeze().cpu().data.numpy()
        
        mask1 = maska + maskb[:,:,::-1]
        mask2 = mask1[:2] + mask1[2:,::-1]
        mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]
        
        return mask3
    
    def test_one_img_from_path_1(self, path):
        img = cv2.imread(path)#.transpose(2,0,1)[None]
        
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.concatenate([img1,img2])
        img4 = np.array(img3)[:,:,::-1]
        img5 = np.concatenate([img3,img4]).transpose(0,3,1,2)
        img5 = np.array(img5, np.float32)/255.0 * 3.2 -1.6
        img5 = V(torch.Tensor(img5).cuda())
        
        mask = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)
        mask1 = mask[:4] + mask[4:,:,::-1]
        mask2 = mask1[:2] + mask1[2:,::-1]
        mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]
        
        return mask3
 
    def load(self, path):
    #   new_state_dict = OrderedDict()
    #  for key, value in torch.load(path).items():
    #     name = 'module.' + key
        #    new_state_dict[name] = value
        #model.load_state_dict(new_state_dict)
        #model = torch.load(path)
        #model.pop('module.finaldeconv1.weight')
        #model.pop('module.finalconv3.weight')
        #self.net.load_state_dict(model,strict=False)
        self.net.load_state_dict(torch.load(path))
 
#os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 指定第一块GPU可用
 
# config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 程序最多只能占用指定gpu50%的显存,服务器上注释掉这句

ImageFile.LOAD_TRUNCATED_IMAGES = True
 
Image.MAX_IMAGE_PIXELS = None
 
slide_window = 512  # 大的滑动窗口
step_length = 256
 
# 1.膨胀图像
print('开始膨胀预测!')
source = 'dataset/sat_test/'
sat_path ="./dataset/sat_test/"
test_path ="./dataset/test/"
if os.path.exists(test_path):
        shutil.rmtree(test_path)  # 递归删除文件夹下的所有内容包扩文件夹本身
os.mkdir(test_path)
file_names = filter(lambda x: x.find('tif')!=-1, os.listdir(sat_path))
original='./Big_Image_Predict_Result/'
if os.path.exists(original):
        shutil.rmtree(original)  # 递归删除文件夹下的所有内容包扩文件夹本身
os.mkdir(original)
 
 
for file in file_names:
    img = os.path.join(sat_path, file)
    fname, ext = os.path.splitext(img)
    base_name = os.path.basename(fname)
    change_name=test_path+base_name+'_sat.tif'
    if not os.path.isfile(change_name):
    #if not os.path.isfile(filePath+'\\'+base_name[0]+'_mask.png'):
            print('原始影像改名成功:"'+file+'"')
            shutil.copy(sat_path+file,change_name)
 
test_list = os.listdir(test_path) 
# # print(file_list)
 
for file in test_list:
    Image_Path = test_path+file
 
    #print(Image_Path)
    #print(Mask_Path)
 
    image = Image.open(Image_Path)
    image_name = file[:-4]
    width = 6060  # 获取图像的宽
    height = 6060  # 获取图像的高
 
    right_fill = step_length - (width % step_length)
    bottom_fill = step_length - (height % step_length)
 
    width_path_number = int((width + right_fill) / step_length)  # 横向切成的小图的数量
    height_path_number = int((height + right_fill) / step_length)  # 纵向切成的小图的数量
 
    image = np.array(image)
    image=image[:6060,:6060,:]
    # image[:,:,3]=image[:,:,1]
 
    image = cv2.copyMakeBorder(image, top=0, bottom=bottom_fill, left=0, right=right_fill,
                            borderType=cv2.BORDER_CONSTANT, value=0)
 
    image = cv2.copyMakeBorder(image, top=step_length // 2, bottom=step_length // 2, left=step_length // 2,
                            right=step_length // 2,
                            borderType=cv2.BORDER_CONSTANT, value=0)  # 填充1/2步长的外边框
    # cv2.namedWindow('swell',cv2.WINDOW_NORMAL)
    # cv2.imshow('swell',image) 
    # cv2.waitKey(0)
    print('图像膨胀步骤完成!')
 
 
    # 2.将膨胀后的大图按照滑窗裁剪
    crop_source = './dataset/'
    #tar=os.path.join('./dataset/',file[:-8]+'/'+'Image_Crop_Result/')
    tar=os.path.join('./dataset/',file[:-8])
    #shutil.rmtree(r"C:\Users\Administrator\Desktop\DeepGlobe-Road-Extraction-link34\dataset\Image_Crop_Result")  # 递归删除文件夹下的所有内容包扩文件夹本身
    if os.path.exists(tar):
        shutil.rmtree(tar)  # 递归删除文件夹下的所有内容包扩文件夹本身
    os.mkdir(tar)
    target=os.path.join(tar,'Image_Crop_Result/')
    os.mkdir(target)
    image_crop_addr = target  # 图像裁剪后存储的文件夹
    image = Image.fromarray(image)  # 将图片格式从numpy转回PIL
    l = 0
    for j in range(height_path_number):
        for i in range(width_path_number):
            box = (i * step_length, j * step_length, i * step_length + slide_window, j * step_length + slide_window)
            small_image = image.crop(box)
            small_image.save(
                image_crop_addr + image_name[:-4] + '({},{})@{:09d}_sat.tif'.format(j, i, l), quality=95)
            l = l + 1
 
    print('膨胀后大图滑窗裁剪步骤完成!')
    
 
    # 3、对上面裁剪得到的小图进行推理
    #targ=os.path.join(sat_path,os.path.pardir)
    print('开始预测!')
    test=os.path.join(tar,'Image_Predict_Result/')
    if os.path.exists(test):
        shutil.rmtree(test)
    os.mkdir(test)
    # path =target+ '*.tif'
    # print(path)
    # expanded_images_crop = glob.glob(path)
    # print(expanded_images_crop)
 
    # # model = load_model("unet_cancer_2021-11-16__01_27_12.h5")  # 加载模型
    # # 对小图一次进行单独预测,再将预测图保存为彩色索引图像
    # for k in expanded_images_crop:
    
    val = os.listdir(target)
    #solver = TTAFrame(LinkNet34)
    solver = TTAFrame(DinkNet34)
    solver.load('weights/log01_Dink101_five_75.th')
    tic = time()
    for i,name in enumerate(val):
        if i%10 == 0:
            print(str(i/10)+'     %.2f'%(time()-tic))
        mask = solver.test_one_img_from_path(target+name)
        mask[mask>4.0] = 255
        mask[mask<=4.0] = 0
        mask = np.concatenate([mask[:,:,None],mask[:,:,None],mask[:,:,None]],axis=2)
        cv2.imwrite(test+name[:-7]+'mask.png',mask.astype(np.uint8))
    
    print('预测步骤完成!')
    
 
    # 4.将膨胀过的图裁剪回原来的大小
    recover=os.path.join(tar,'Image_Recover/')
    if os.path.exists(recover):
        shutil.rmtree(recover)  # 递归删除文件夹下的所有内容包扩文件夹本身
    os.mkdir(recover)
 
    val = os.listdir(test)
    for i,expanded_image in enumerate(val):
        img = Image.open(test+expanded_image)
        img_name = os.path.basename(test+expanded_image)
        box = (128, 128, 384, 384)
        original_image = img.crop(box)
        original_image.save(recover + img_name, quality=95)
    print('图像裁剪回原来大小步骤完成!')
 
    # 5、图片拼接
    IMAGES_PATH = recover  # 图片集地址
    IMAGES_FORMAT = ['.png']  # 图片格式
    IMAGE_SIZE =  256 # 每张小图片的大小
    original='./Big_Image_Predict_Result/'
    # if not os.path.exists(original):
    #     #shutil.rmtree(original)  # 递归删除文件夹下的所有内容包扩文件夹本身
    #     os.mkdir(original)
 
    # 获取图片集地址下的所有图片名称
    image_names = [name for name in os.listdir(IMAGES_PATH) for item in IMAGES_FORMAT if
                os.path.splitext(name)[1] == item]
    
    image_names.sort(key=lambda x:int(x[-18:-9]))  # 这句不能少,os.listdir得到的文件没有顺序,必须进行排序
    
    IMAGE_ROW = int(height_path_number)  # 图片间隔,也就是合并成一张图后,一共有几行
    IMAGE_COLUMN = int(width_path_number)  # 图片间隔,也就是合并成一张图后,一共有几列
    # 简单的对于参数的设定和实际图片集的大小进行数量判断
    if len(image_names) != IMAGE_ROW * IMAGE_COLUMN:
        raise ValueError("合成图片的参数和要求的数量不能匹配!")
 
    to_image = Image.new('RGB', (IMAGE_COLUMN * IMAGE_SIZE, IMAGE_ROW * IMAGE_SIZE))  # 创建一个新图
    # 循环遍历,把每张图片按顺序粘贴到对应位置上
    for y in range(1, IMAGE_ROW + 1):
        for x in range(1, IMAGE_COLUMN + 1):
            #print(image_names[IMAGE_COLUMN * (y - 1) + x - 1])
            from_image = Image.open(IMAGES_PATH + image_names[IMAGE_COLUMN * (y - 1) + x - 1]).resize(
                (IMAGE_SIZE, IMAGE_SIZE), Image.ANTIALIAS)
            to_image.paste(from_image, ((x - 1) * IMAGE_SIZE, (y - 1) * IMAGE_SIZE))
 
    # 拼接完的大图的右侧和下侧有多余填充上去的部分,应裁掉
    box2 = (0, 0, int(to_image.size[0] - right_fill / (width + right_fill) * to_image.size[0]),
            int(to_image.size[1] - bottom_fill / (height + bottom_fill) * to_image.size[1]))
    original_mask = to_image.crop(box2)
 
    original_mask.save(original + image_name[:-4] + "_mask.png", quality=95)  # 保存新图
    
    print('图像拼接步骤完成!')
    shutil.rmtree(tar)  # 递归删除文件夹下的所有内容包扩文件夹本身
 
print('********')
print("Finish膨胀预测!") 
print('********')

'''iou'''
miou_mode       = 2
num_classes     = 2
name_classes    = ["nontarget","target"]

data_path  = './dataset/'
mask_tar ="./dataset/mask_test/"
real_path ="./dataset/real/"
if os.path.exists(real_path):
        shutil.rmtree(real_path)  # 递归删除文件夹下的所有内容包扩文件夹本身
os.mkdir(real_path)
mask_path = './dataset/mask_tif2png_test/'

if os.path.exists(mask_path):
    shutil.rmtree(mask_path)  # 递归删除文件夹下的所有内容包扩文件夹本身
os.mkdir(mask_path)


mask_list = os.listdir(mask_tar) 
for name in mask_list:
    # 获取图片文件全路径
    img_path = os.path.join(mask_tar, name)
    #获取文件名,不包含扩展名
    filename = os.path.splitext(name)[0]
    savefilename = filename+".png"
    #文件存储全路径
    savepath = os.path.join(mask_path, savefilename)
    shutil.copy(mask_tar+name,mask_path+savefilename)

print("完成所有标签图片格式转换!")

file_names = filter(lambda x: x.find('png')!=-1, os.listdir(mask_path))

for file in file_names:
    img = os.path.join(mask_path, file)
    fname, ext = os.path.splitext(img)
    base_name = os.path.basename(fname)
    change_name=real_path+base_name+'_mask.png'
    if not os.path.isfile(change_name):
    #if not os.path.isfile(filePath+'\\'+base_name[0]+'_mask.png'):
            print('真实标签改名成功:"'+file+'"')
            shutil.copy(mask_path+file,change_name)
#sat_path =  r"C:\Users\Administrator\Desktop\DeepGlobe-Road-Extraction-link34\dataset\sat"  # 源文件夹
#mask_path = r"C:\Users\Administrator\Desktop\DeepGlobe-Road-Extraction-link34\dataset\mask"  # 目标文件夹
weight_dir      =  "./weights/"
weight_list = os.listdir(weight_dir)
weight_list.sort()

mylog = open('./Big_Image_Predict_Result/count_low_pic.log','w')

for weight_name in weight_list:
    print("加载训练权重模型"+str(weight_name)) 
    f=open("./dataset/gt.txt", 'w')
    gt_dir      = os.path.join(data_path, "real/")
    pred_dir    = "./Big_Image_Predict_Result/"
    path_list = os.listdir(gt_dir)
    path_list.sort()
    dirList(gt_dir,path_list)
    saveList(path_list)
    image_ids   = open(os.path.join(data_path, "gt.txt"),'r').read().splitlines() 
    
    target = os.path.join('./Big_Image_Predict_Result/',weight_name[:-3])
    lower_iou = os.path.join('./Big_Image_Predict_Result/',weight_name[:-3]+'/'+'lower_iou/')
    higher_iou = os.path.join('./Big_Image_Predict_Result/',weight_name[:-3]+'/'+'higher_iou/')
    if not os.path.exists(target):
        os.mkdir(target)
    if not os.path.exists(lower_iou):  
        os.mkdir(lower_iou)
    if not os.path.exists(higher_iou):  
        os.mkdir(higher_iou)
    
    mylog.write(str(weight_name[:-3]))

    if miou_mode == 0 or miou_mode == 2:
        print('计算测试miou') 
        test_mIou,test_mPA,test_miou,test_mpa=compute_mIoU(gt_dir, pred_dir, image_ids, num_classes, name_classes,weight_name)  # 执行计算mIoU的函数
        mylog.write('  test_mIoU:  '+str(test_miou))
        print('  test_mIoU:  '+str(test_miou))
        mylog.write('  test_mPA:  '+str(test_mpa))
        print("测试miou计算完毕") 

        count=0
        print('计算测试样本单张iou') 
        count=compute_IoU(gt_dir, pred_dir, image_ids, num_classes, lower_iou,higher_iou,weight_name,count)  # 执行计算mIoU的函数
        mylog.write('  low-iou test picture num:  '+str(count))
        print('测试样本单张iou计算完毕') 

    mylog.write('Finish!')
if os.path.exists(mask_path):
    shutil.rmtree(mask_path)  # 递归删除文件夹下的所有内容包扩文件夹本身
print('********')
print('Finish计算iou!') 
print('********')
            
'''    def transButtonclick(self):'''
sat_path ="./dataset/sat_test/"
original='./Big_Image_Predict_Result/'

addcor_original='./dataset/Add_Coordinate/'
shp='./Final_Result_SHP/'
if not os.path.exists(addcor_original):
        os.mkdir(addcor_original)
if not os.path.exists(shp):
        os.mkdir(shp)
original_names = filter(lambda x: x.find('mask')!=-1, os.listdir(original))
for f in original_names:
    CorimgPath = sat_path+ f[:-9] + '.tif'
    if os.path.exists(CorimgPath):
        path = original + f.strip()
        print(str(path))

        in_ds = gdal.Open(CorimgPath)
        in_ds2= gdal.Open(path)

        bands_num = in_ds2.RasterCount
        png_image = Image.open(path)
        block_xsize = png_image.size[0]
        block_ysize = png_image.size[1]

        # 读取原图中的每个波段,通道数从1开始,默认前三波段
        in_band1 = in_ds2.GetRasterBand(1)
        in_band2 = in_ds2.GetRasterBand(2)
        in_band3 = in_ds2.GetRasterBand(3)

        gtif_driver = gdal.GetDriverByName("GTiff")  # 数据类型必须有,因为要计算需要多大内存空间,但是这儿是只有GTiff吗?
        filename = addcor_original + f[:-9] + '_mask.tif'  # 文件名称

        out_band1 = in_band1.ReadAsArray(0, 0, block_xsize, block_ysize)
        out_band2 = in_band2.ReadAsArray(0, 0, block_xsize, block_ysize)
        out_band3 = in_band3.ReadAsArray(0, 0, block_xsize, block_ysize)

        # 获取原图的原点坐标信息
        ori_transform = in_ds.GetGeoTransform()
        out_ds = gtif_driver.Create(filename, block_xsize, block_ysize, 3, in_band1.DataType)  # 数据格式遵循原始图像
        out_ds.SetGeoTransform(ori_transform)

        # 设置SRS属性(投影信息)
        out_ds.SetProjection(in_ds.GetProjection())

        # 写入目标文件(如果波段数有更改,这儿也需要修改)
        out_ds.GetRasterBand(1).WriteArray(out_band1)
        out_ds.GetRasterBand(2).WriteArray(out_band2)
        out_ds.GetRasterBand(3).WriteArray(out_band3)

        # 将缓存写入磁盘,直接保存到了程序所在的文件夹
        # out_ds.FlushCache()
        print(str(filename))
        del out_ds
for img in os.listdir(addcor_original):
    input_path=addcor_original + img.strip()
    Outshp_path=shp + img[:-9] +  +'.shp'
    print(str(Outshp_path))
    GridToShp(input_path,Outshp_path)

print('********')
print('Finish匹配地理坐标后转矢量!') 
print('********')

平均结果

 
import os
from PIL import Image
import cv2
import numpy as np
 

rgb_path='/mnt/sdb1/fenghaixia/fourtestys/Big_Image_Predict_Result/'
h_path='/mnt/sdb1/fenghaixia/fourtesth/Big_Image_Predict_Result/'
savepath='/mnt/sdb1/fenghaixia/fourtesth/aver_img/'
filelist = os.listdir(rgb_path)
for f in filelist:
    rgb_name=rgb_path+f.strip()
    h_name=h_path+f.strip()
    # im = Image.open(path + item) #打开图片
    rgb_im = cv2.imread(rgb_name)
    h_im=cv2.imread(h_name)
    aver_im=(rgb_im+h_im)/2.0
    aver_im[aver_im>4.0]=255
    aver_im[aver_im<=4.0]=0
    cv2.imwrite(savepath + f.split('(')[0] +'_mask.png', aver_im)
    print(f.split('_')[0:2] +'_mask.png')
print('finish')

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值