**
百行高光谱代码
**
主函数
from matplotlib import pyplot as plt
import torch.optim as optim
from torch.utils.data import DataLoader
from modle.cnn import *#是把一个模块中所有函数都导入进来;
from data.dataset import Hyperspectral_DataSet
#最高准确率和DEVICE指定
best_acc=0
train_loss=[]
train_acc=[]
test_loss=[]
test_acc=[]
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def main():
global best_acc
#模型加载和指定
model=Hyperspectral_CNN(num_classes=10)
model=model.to(device)
#损失函数 优化器 BATCH EPOH指定
criterion=nn.CrossEntropyLoss()
optimizer=optim.Adam(model.parameters(),lr=0.0001)
BATCH_SIZE=128
EPOCH=100
#数据集预处理
train_db=Hyperspectral_DataSet('./hyperspectral-files/PaviaU.mat',
'./hyperspectral-files/PaviaU_gt.mat')
#数据集划分和加载
train_db,val_db= torch.utils.data.random_split(train_db,[int(len(train_db)*0.8),int(len(train_db)*0.2)])
train_loader=DataLoader(train_db,batch_size=BATCH_SIZE,shuffle=True)
test_loader=DataLoader(val_db,batch_size=BATCH_SIZE)
for epoch in range(EPOCH):
model.train()
count = 0.
all = 0
total_loss = 0.
for batch_idx, (input, target) in enumerate(train_loader):
#all计算
all+=input.shape[0]
#把数据加载到GPU
input=input.to(device)
target=target.to(device)
#input进行modle运算
output=model(input)
#loss计算
loss=criterion(output,target)
#预测结果求每一行最大的列标号
output=torch.argmax(output,1)
#计算一个批次预测对的数量
count+=(output==target).sum()
#total_loss提取
total_loss+=loss.cpu().item()
# 先将梯度归零
optimizer.zero_grad()
# 反向传播计算得到每个参数的梯度值
loss.backward()
# 通过梯度下降执行一步参数更新
optimizer.step()
train_loss.append(total_loss )
train_acc.append(count / all)
print('train_acc:',count/all)
model.eval()
count = 0.
all = 0
loss = 0.0
#TEST不用梯度反向传播
with torch.no_grad():
for (input, target) in test_loader:
#all计算
all+=input.shape[0]
#把数据加载到GPU
input=input.to(device)
target=target.to(device)
#input进行modle运算
out=model(input)
#loss计算
loss=criterion(out,target)
#预测结果求每一行最大的列标号
out=torch.argmax(out,1)
#计算一个批次预测对的数量
count+=(out==target).sum()
pass
acc=count/all
print('test_acc:',acc)
test_loss.append(loss)
test_acc.append(acc)
#计算BEST_ACC
if acc > best_acc:
best_acc=acc
torch.save(model.state_dict(),'./hyperspectral-checkpoints/best-{}.pth'.format(best_acc))
pass
fig,ax =plt.subplots(1,2)
ax[0].plot(range(EPOCH), train_loss, label='train_loss')
ax[0].plot(range(EPOCH), test_loss, label='test_loss')
ax[0].legend()
ax[1].plot(range(EPOCH), train_acc, label='train_acc')
ax[1].plot(range(EPOCH), test_acc, label='test_acc')
ax[1].legend() #设置图例
plt.savefig('./result.png')
pass
if __name__ == '__main__':
main()
数据处理模块
import scipy.io as scio
class Hyperspectral_DataSet(Dataset):
def __init__(self, data_path,label_path):
super().__init__()
self.data=scio.loadmat(data_path)['paviaU'].reshape((-1,103)).astype(int)
self.labels=scio.loadmat(label_path)['paviaU_gt'].flatten().astype(int)
permutation = np.random.permutation(self.data.shape[0])#以下两步进行了数据的随机排序,即数据预处理的过程
self.data = self.data[permutation, :]
self.labels = self.labels[permutation]
def __len__(self):
return self.data.shape[0]
def __getitem__(self, item):
return torch.tensor(np.expand_dims(self.data[item], axis=0), dtype=torch.float32), torch.tensor(
self.labels[item], dtype=torch.long)
CNN模块
import torch.nn as nn
import torch
import numpy as np
class Hyperspectral_CNN(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=16, kernel_size=4), nn.BatchNorm1d(16), nn.ReLU(),
nn.Conv1d(in_channels=16, out_channels=16, kernel_size=4), nn.BatchNorm1d(16), nn.ReLU(),
)
self.pool1 = nn.MaxPool1d(2)
self.conv2 = nn.Sequential(
nn.Conv1d(in_channels=16, out_channels=32, kernel_size=4), nn.BatchNorm1d(32), nn.ReLU(),
nn.Conv1d(in_channels=32, out_channels=32, kernel_size=4), nn.BatchNorm1d(32), nn.ReLU(),
)
self.pool2 = nn.MaxPool1d(2)
self.conv3 = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=4), nn.BatchNorm1d(64), nn.ReLU(),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=4), nn.BatchNorm1d(64), nn.ReLU(),
)
self.pool3 = nn.MaxPool1d(2)
self.conv4 = nn.Sequential(
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=4), nn.BatchNorm1d(128), nn.ReLU(),
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=4), nn.BatchNorm1d(128), nn.ReLU(),
)
self.fc1 = nn.Linear(128, 56)
self.fc2 = nn.Linear(56, num_classes)
def forward(self, x):
out = x
out = self.pool1(self.conv1(out))
out = self.pool2(self.conv2(out))
out = self.pool3(self.conv3(out))
out = self.conv4(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.fc2(out)
return out
def feature(self, x):
x = torch.tensor(np.reshape(x, (1, 1, len(x))), dtype=torch.float32)
out = self.pool1(self.conv1(x))
out = self.pool2(self.conv2(out))
out = self.pool3(self.conv3(out))
out = self.conv4(out)
return out.view(-1)