以下是使用TensorFlow实现的Unet遥感图像分类代码:
```python
import tensorflow as tf
import numpy as np
import os
import cv2
from sklearn.model_selection import train_test_split
# 设置随机数种子,保证每次运行结果一致
np.random.seed(42)
tf.random.set_seed(42)
# 数据集路径
data_path = "path/to/dataset"
# 定义Unet网络结构
def Unet():
inputs = tf.keras.layers.Input(shape=(256, 256, 3))
conv1 = tf.keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = tf.keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = tf.keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = tf.keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = tf.keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = tf.keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = tf.keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = tf.keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = tf.keras.layers.Dropout(0.5)(conv4)
pool4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = tf.keras.layers.Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = tf.keras.layers.Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = tf.keras.layers.Dropout(0.5)(conv5)
up6 = tf.keras.layers.Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(drop5))
merge6 = tf.keras.layers.concatenate([drop4, up6], axis=3)
conv6 = tf.keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = tf.keras.layers.Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = tf.keras.layers.Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(conv6))
merge7 = tf.keras.layers.concatenate([conv3, up7], axis=3)
conv7 = tf.keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = tf.keras.layers.Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = tf.keras.layers.Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(conv7))
merge8 = tf.keras.layers.concatenate([conv2, up8], axis=3)
conv8 = tf.keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = tf.keras.layers.Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = tf.keras.layers.Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(conv8))
merge9 = tf.keras.layers.concatenate([conv1, up9], axis=3)
conv9 = tf.keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = tf.keras.layers.Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = tf.keras.layers.Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = tf.keras.layers.Conv2D(1, 1, activation='sigmoid')(conv9)
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model
# 加载数据集
def load_data():
images = []
masks = []
for root, dirs, files in os.walk(data_path):
for file in files:
if file.endswith(".tif"):
# 读取遥感图像
image = cv2.imread(os.path.join(root, file))
# 读取对应的遥感图像掩码
mask = cv2.imread(os.path.join(root, file.replace(".tif", "_mask.tif")), cv2.IMREAD_GRAYSCALE)
# 对掩码进行二值化处理
mask = np.where(mask > 0, 1, 0)
# 调整图像大小为256x256
image = cv2.resize(image, (256, 256))
mask = cv2.resize(mask, (256, 256))
# 将图像和掩码添加到列表中
images.append(image)
masks.append(mask)
# 将图像和掩码转换为numpy数组
images = np.array(images)
masks = np.array(masks)
# 将掩码转换为one-hot编码
masks = tf.keras.utils.to_categorical(masks, num_classes=2)
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(images, masks, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
# 训练模型
def train_model():
# 加载数据集
X_train, X_test, y_train, y_test = load_data()
# 构建Unet模型
model = Unet()
# 定义损失函数和优化器
loss_fn = tf.keras.losses.BinaryCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
# 编译模型
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
# 训练模型
model.fit(X_train, y_train, batch_size=32, epochs=50, validation_data=(X_test, y_test))
if __name__ == '__main__':
train_model()
```
在这个实现中,我们使用了TensorFlow中的keras API来构建Unet模型。load_data函数用于加载数据集,其中包括遥感图像和对应的掩码。训练模型使用了BinaryCrossentropy作为损失函数和Adam作为优化器,训练50个epochs。