本地训练模型上传GEE平台
袋装决策树算法上传GEE平台
因为GEE平台本身不提供袋装决策树算法,所以只能曲线救国。在本地训练好模型再上传到云平台调用,但这个过程需要使用GEE Python本地版。下面就介绍下实现方法。
主要流程:
1.袋装决策树的本地训练
参考是这篇博文:
【BaggingClassifier分类器的使用方法】: link
训练样本数据点是要由GEE导出CSV格式,参考是这篇博文:
【GEE与本地随机森林回归模型结合使用】: link
2.GEE Python API-GeeMap的环境设置
环境设置
参考博文: link
3.模型上传和调用
本地训练模型函数ml 上传
参考博文:link
调用
参考博文:link
涉及 Python语言和JS之间的转化
参考博文:link
最后,上代码
import ee
import geemap
import pandas as pd
from geemap import ml
from sklearn import ensemble
ee.Initialize()
geemap.ee_initialize()
import ee
import geemap
Map = geemap.Map()
import geemap
Map = geemap.Map()
#
neimeng = ee.FeatureCollection('users/shenzy0921/NeiMengGu')
#print("roiArea(km2)",neimeng.geometry().area().divide(1000000))
def data_preprocessing(studyArea,startYear,endYear):
def maskS2clouds(image):
qa = image.select('QA60')
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudBitMask).eq(0) \
.And(qa.bitwiseAnd(cirrusBitMask).eq(0))
return image.updateMask(mask).divide(10000)
#
def selectNir(image):
return image.select('B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B11', 'B8A')
#
def spatial_interpolate(image):
#
spatialImage = image.resample('bicubic')
#
return spatialImage
#
startDate = ee.Date.fromYMD(startYear, 1, 1)
endDate = ee.Date.fromYMD(endYear, 12, 31)
#
collection = ee.ImageCollection(ee.ImageCollection('COPERNICUS/S2_SR').filterBounds(studyArea) \
.filterDate(startDate, endDate) \
.filter(ee.Filter.calendarRange(0, 365, 'day_of_year')) \
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE',20)) \
.map(maskS2clouds) \
.map(selectNir) \
.map(spatial_interpolate).sort('system:time_start')
)
return collection
#
def calculateIndices(image):
NDVI =image.normalizedDifference(['B8','B4']).rename('NDVI')
EVI = image.expression('2.5*((NIR-Red)/(NIR+6*Red-7.5*Blue+1))',{
'NIR':image.select('B8'),
'Red':image.select('B4'),
'Blue':image.select('B2')
}).float().rename('EVI')
SAVI = image.expression('1.5*((NIR-Red)/(NIR+Red+0.5))',{
'NIR':image.select('B8'),
'Red':image.select('B4'),
}).float().rename('SAVI')
NDRE =image.normalizedDifference(['B6','B5']).rename('NDRE')
NDRE2 = image.expression('1.5*((RedEdge2-RedEdge1)/(RedEdge2+RedEdge1-2*Aerosols))',{
'RedEdge2':image.select('B6'),
'RedEdge1':image.select('B5'),
'Aerosols':image.select('B1'),
}).float().rename('NDRE2')
MSR =image.normalizedDifference(['B6','B1']).rename('MSR')
MTCI = image.expression('((RedEdge2-RedEdge1)/(RedEdge1-Red))',{
'RedEdge2':image.select('B6'),
'RedEdge1':image.select('B5'),
'Red':image.select('B4'),
}).float().rename('MTCI')
REP = image.expression('705+35*((0.5*(Red-RedEdge3)-RedEdge1)/(RedEdge2-RedEdge1))',{
'RedEdge3':image.select('B7'),
'RedEdge2':image.select('B6'),
'RedEdge1':image.select('B5'),
'Red':image.select('B4'),
}).float().rename('REP')
return image.add(NDVI).addBands(EVI).addBands(SAVI).addBands(NDRE).addBands(NDRE2).addBands(MSR) \
.addBands(MTCI).addBands(REP).addBands(CI)
def computeImageCollectionStatistics(image):
min = image.reduce(ee.Reducer.min())
max = image.reduce(ee.Reducer.max())
median = image.reduce(ee.Reducer.median())
mean = image.reduce(ee.Reducer.mean())
stdv = image.reduce(ee.Reducer.stdDev())
p_25=image.reduce(ee.Reducer.percentile([25]))
p_75=image.reduce(ee.Reducer.percentile([75]))
iqr=p_75.subtract(p_25)
result = ee.Image().select().addBands([
min, max, median, mean, stdv, iqr, p_25, p_75,
])
return result
bands1=['B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B11', 'B8A',
'EVI', 'SAVI', 'NDRE', 'NDRE2', 'MSR', 'MTCI', 'REP']
SpectralTemporalData_roi = []
composite_data_roi=data_preprocessing(neimeng,2022,2022)
composite_data_roi=composite_data_roi.filterBounds(neimeng).map(calculateIndices)
temp = computeImageCollectionStatistics(composite_data_roi.select(bands1)) \
.clip(neimeng)
imgAllBands = ee.ImageCollection(temp).mosaic()
feature_names = ['B11_max', 'B11_mean', 'B11_median', 'B11_min', 'B11_p25', 'B11_p75','B11_p75_1','B11_stdDev'
]
# read the exported tree feature collection
rf_fc = ee.FeatureCollection('users/BaggedDT')
# convert it to a classifier, very similar to the `ml.trees_to_classifier` function
another_classifier = ml.fc_to_classifier(rf_fc)
# classify the image again but with the classifier from the persisted trees
classified = imgAllBands.select(feature_names).classify(another_classifier)
def getBoundingBox(geometry):
bbox = geometry.bounds()
#return the list of coordinates
listCoords = ee.Array.cat(bbox.coordinates(),1)
#get the X-coordinates
xCoords = listCoords.slice(1,0,1)
yCoords = listCoords.slice(1,1,2)
#reduce the arrays to find the max (or min) value
xMin = ee.ComputedObject.getInfo(xCoords.reduce('min',[0]).get([0,0]))
xMax = ee.ComputedObject.getInfo(xCoords.reduce('max',[0]).get([0,0]))
yMin = ee.ComputedObject.getInfo(yCoords.reduce('min',[0]).get([0,0]))
yMax = ee.ComputedObject.getInfo(yCoords.reduce('max',[0]).get([0,0]))
boundingBox = [[xMin,yMin],[xMin,yMax],[xMax,yMax],[xMax,yMin]]
print(boundingBox)
return boundingBox
geometry = ee.Geometry.Polygon(
[[[119.9834710282812, 43.987118178378346],
[119.9834710282812, 43.4411885827272],
[121.1645013017187, 43.4411885827272],
[121.1645013017187, 43.987118178378346]]])
def exportGeoTiff(image, filename, geometry):
boundingBox = getBoundingBox(geometry)
taskConfig = {
'folder': 'BaggedDT',
'scale': 20,
'fileFormat': 'GeoTIFF',
'region': boundingBox
}
task = ee.batch.Export.image(image,filename,taskConfig)
task.start()
OutResult = exportGeoTiff(classified, 'BaggedDT-3', geometry)
# Make a palette: a list of hex strings.
palette = ['CC0000', 'FFCC00', '336600', '00FF00', 'CCCCCC']
Map