**
说明:
**
1、本文是基于网页:https://blog.csdn.net/u014311125/article/details/93746867
的基础上进行代码优化的,原理未变,只加快了运算速度。
2、当我用这段代码跑64位深的图片的时候,根本计算不出来,经过代码分析主要原因为里面的几个for循环,当图像太大(千万甚至上亿的运算量)的时候根本无法计算。
3、本人花了一天时间优化了代码,运用pandas数组运算,得到的结果是一样,但是效率提升了n多倍。
**
测试效果及以后优化方向
**
1、用博主提供的图片进行测试,测试效果与博主的转换结果效果是完全等同;
2、试着用一个64位深的图片放进去转换速度也很快,但是颜色变了,里面原理可能需要再修改以下。
3、下一步可以按此思路进行更深度的图片转换,下面废话不多说,抛上我的代码。
#!usr/bin/env python
# -*- coding: utf-8 -*-
"""
将16位遥感图像压缩至8位,并保持色彩一致
"""
import gdal
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
def read_tiff(input_file):
"""
读取影像
:param input_file:输入影像
:return:波段数据,仿射变换参数,投影信息、行数、列数、波段数
"""
dataset = gdal.Open(input_file)
rows = dataset.RasterYSize
cols = dataset.RasterXSize
geo = dataset.GetGeoTransform()
proj = dataset.GetProjection()
couts = dataset.RasterCount
array_data = np.zeros((couts, rows, cols))
for i in range(couts):
band = dataset.GetRasterBand(i + 1)#读取每个波段的突破
array_data[i, :, :] = band.ReadAsArray()#融合每个波段到图片
return array_data, geo, proj, rows, cols, 3
def compress(origin_16, output_8):
array_data, geo, proj, rows, cols, couts = read_tiff(origin_16)
compress_data = np.zeros((couts, rows, cols))
for i in range(couts):
band_max = np.max(array_data[i, :, :])
band_min = np.min(array_data[i, :, :])
cutmin, cutmax = cumulativehistogram(array_data[i, :, :], rows, cols, band_min, band_max)
compress_scale = (cutmax - cutmin) / 255
#计算时间复杂度太高,需优化
# for j in range(rows):
# for k in range(cols):
# if (array_data[i, j, k] < cutmin):
# array_data[i, j, k] = cutmin
#
# if (array_data[i, j, k] > cutmax):
# array_data[i, j, k] = cutmax
#
# compress_data[i, j, k] = (array_data[i, j, k] - cutmin) / compress_scale
#改编代码如下四行实现
temp=np.array(array_data[i, :, :])
temp[temp>cutmax]=cutmax
temp[temp<cutmin]=cutmin
compress_data[i,:,:]=(temp-cutmin)/compress_scale
write_tiff(output_8, compress_data, rows, cols, couts, geo, proj)
def write_tiff(output_file, array_data, rows, cols, counts, geo, proj):
Driver = gdal.GetDriverByName("Gtiff")
dataset = Driver.Create(output_file, cols, rows, counts, gdal.GDT_Byte)
dataset.SetGeoTransform(geo)
dataset.SetProjection(proj)
for i in range(counts):
band = dataset.GetRasterBand(i + 1)
band.WriteArray(array_data[i, :, :])#波段写入顺序调整可以改变图像颜色,思路i改为2-i
def cumulativehistogram(array_data, rows, cols, band_min, band_max):
"""
累计直方图统计
"""
# 逐波段统计最值
gray_level = int(band_max - band_min + 1)
# gray_array = np.zeros(gray_level)
gray_array = np.ones(gray_level)
counts = 0
#下面两个for循环时间复杂度高,当有千万级的数据运算会很慢****************
# for row in range(rows):
# for col in range(cols):
# gray_array[int(array_data[row, col] - band_min)] += 1
# counts += 1
#改编上面的功能,代码如下**************
b=array_data - band_min
c=np.array(b).reshape(1,-1)
d=pd.DataFrame(c[0])[0].value_counts()
for i in range(len(d.index)):
gray_array[int(d.index[i])]=int(d.values[i])
# print(gray_array)
counts=rows*cols
#截至*****************************
count_percent2 = counts * 0.02
count_percent98 = counts * 0.98
cutmax = 0
cutmin = 0
for i in range(1, gray_level):
gray_array[i] += gray_array[i - 1]
if (gray_array[i] >= count_percent2 and gray_array[i - 1] <= count_percent2):
cutmin = i + band_min
if (gray_array[i] >= count_percent98 and gray_array[i - 1] <= count_percent98):
cutmax = i + band_min
return cutmin, cutmax
if __name__ == '__main__':
# origin_16 = "./datas/mulObject/GF1_PMS1_E128.9_N35.0_20190127_L1A0003796627-MSS1-recitfy.tiff"
origin_16 = "./datas/test16to8/in/20190630112555233.png"
output_8 = "./datas/test16to8/out/gj2.tif"
compress(origin_16, output_8)