pytorch实现森林植被数据集分类(7分类)

导入模块及数据

from sklearn.datasets import fetch_covtype
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader

#数据
fetch = fetch_covtype()
#特征及标签
X, y = fetch.data, fetch.target
y = y-1
#训练集与测试集划分
x_tr, x_te, y_tr, y_te = train_test_split(X, y, test_size=0.3, random_state=666)

随机森林建模

randfc = RandomForestClassifier(n_estimators=300,max_depth=20)
randfc.fit(x_tr,y_tr)
y_pred = randfc.predict(x_te)
score = randfc.score(x_te,y_te)
#输出准确率
print(round((y_pred == y_te).mean(),3))

在这里插入图片描述

设计神经网络层及训练

#将数据转换为tensor类型
x_train = torch.from_numpy(x_tr).type(torch.float32)
x_test = torch.from_numpy(x_te).type(torch.float32)
y_train = torch.from_numpy(y_tr).type(torch.long)
y_test = torch.from_numpy(y_te).type(torch.long)
#定义参数及设备
batch = 64
lr = 0.001
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#打包数据
train_td = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_td, batch_size=batch)
test_td = TensorDataset(x_test, y_test)
test_dl = DataLoader(test_td, batch_size=batch)
#设计网络层
model = nn.Sequential(
    nn.Linear(54,108),
    nn.ReLU(),
    nn.Linear(108,108),
    nn.ReLU(),
    nn.Linear(108,64),
    nn.ReLU(),
    nn.Linear(64,64),
    nn.ReLU(),
    nn.Linear(64,35),
    nn.ReLU(),
    nn.Linear(35,7)
                    )

model.to(device) #gpu计算
#定义优化器及损失函数
optimizer = optim.SGD(model.parameters(), lr = lr)
loss_func = nn.CrossEntropyLoss()

#开始训练
loss_list = [] #保存每一轮的准确率
for epoch in range(600):
    train_all_loss = 0.0
    t1 = 0.0
    for indexs, (datas,labels) in enumerate(train_dl):
        t1 = indexs
        datas, labels = datas.to(device), labels.to(device)
        predict = model(datas)
#         print(predict.size(),labels.size())
#         print()
        train_loss = loss_func(predict, labels)
        train_all_loss += train_loss.item()
        optimizer.zero_grad()
        train_loss.backward()
        optimizer.step()
        
        if indexs % 1000 == 0:
            print("batch_loss:{:.3f}".format(train_loss))
    
    with torch.no_grad():
        test_all_loss = 0.0
        accaucy = 0.0
        t2 = 0.0
        for index, (data, label) in enumerate(test_dl):
            t2 = index
            data, label = data.to(device), label.to(device)
            out = model(data)
            test_loss = loss_func(out, label)
            test_all_loss += test_loss.item()
            pred = out.argmax(dim=1)
            batch_accaucy = pred.eq(label).float().sum()
#             print("size:",pred.size(),label.size())
            if index % 1000 == 0:
                print("batch_accauacy:",batch_accaucy.item())
            accaucy += batch_accaucy.item()
        train_all_loss = train_all_loss / t1
        test_all_loss = test_all_loss / t2
        accaucy = accaucy / len(x_test)
        loss_list.append(round(accaucy,3))
        print("eopch:{} train_loss:{:.3f} test_loss:{:.3f} accaucy:{:.3f}".format(epoch,train_all_loss,test_all_loss,accaucy))

#输出最大准确率
print(max(loss_list))

在这里插入图片描述

  • 1
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值