深度学习——鸢尾花的分类,基于jupyter notebook

 导入所需的各种包

%matplotlib inline
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
torch.set_printoptions(edgeitems=2, linewidth=75)
#导入所需的包

导入所需的数据

path=r'D:\My_download\Python_data\iris_training.csv'
org_data=pd.read_csv(path)
org_data.head()
len(org_data)

将数据进行切割整理,分为训练集和测试集

train_data=org_data.sample(frac=0.8)
val_data=org_data[~org_data.index.isin(train_data.index)]
len(train_data),len(val_data)

在训练集和测试集中划分出X和Y

train_data_X=train_data.iloc[:,0:4]
train_data_Y=train_data.iloc[:,4:].squeeze()
val_data_X=val_data.iloc[:,0:4]
val_data_Y=val_data.iloc[:,4:].squeeze()

将数据进行标准化处理

#标准化
from sklearn import preprocessing
train_data_X=preprocessing.StandardScaler().fit_transform(train_data_X)
val_data_X=preprocessing.StandardScaler().fit_transform(val_data_X)

将数据转化为Tensor格式

#转化为tensor模式
train_data_X = torch.from_numpy(np.array(train_data_X)).float()
train_data_Y = torch.from_numpy(np.array(train_data_Y)).float()
val_data_X = torch.from_numpy(np.array(val_data_X)).float()
val_data_Y = torch.from_numpy(np.array(val_data_Y)).float()
train_data_Y = train_data_Y.long()
val_data_Y = val_data_Y.long()

构建神经网络模型

#初始化神经网络
import torch.nn as nn
class NN(nn.Module):
    def __init__(self,in_dim,hidden_dim1,hidden_dim2,out_dim):
        super().__init__()
        self.layer1=nn.Linear(in_dim,hidden_dim1)
        self.layer2=nn.Linear(hidden_dim1,hidden_dim2)
        self.layer3=nn.Linear(hidden_dim2,out_dim)
    def forward(self,x):
        x = nn.Sigmoid()(self.layer1(x))
        x=nn.Sigmoid()(self.layer2(x))
        x=nn.Sigmoid()(self.layer3(x))
        return x

定义优化器和模型

model=NN(4,32,16,3)
optimizer=optim.Adam(model.parameters(),lr=1e-5)
model

定义循环函数

def training_loop(n_epochs, optimizer, model, loss_fn, train_t_u, train_t_c, val_t_u, val_t_c, loss_steps=None):
    for epoch in range(1, n_epochs+1):
        train_t_p = model(train_t_u)
        train_loss = loss_fn(train_t_p, train_t_c)
        val_t_p = model(val_t_u)
        val_loss = loss_fn(val_t_p, val_t_c)
        optimizer.zero_grad()
        train_loss.backward()
        optimizer.step()
        if epoch <= 3 or epoch % 50000 == 0:
            print(f'Epoch {epoch}, Training loss {train_loss.item():.4f}, Validation loss {val_loss.item():.4f}')

        # 计算准确率
            train_accuracy = compute_accuracy(train_t_p, train_t_c)
            val_accuracy = compute_accuracy(val_t_p, val_t_c)
            print(f' Training accuracy: {train_accuracy:.2f}%, Validation accuracy: {val_accuracy:.2f}%')

    return train_t_p, val_t_p

定义计算准确率的函数

def compute_accuracy(predictions, targets):
    predicted_labels = torch.argmax(predictions, dim=1)
    correct = (predicted_labels == targets).sum().item()
    total = targets.size(0)
    accuracy = correct / total * 100

    return accuracy

开始训练

training_loop(
        n_epochs=500000,
        optimizer=optimizer,
        model=model,
        loss_fn=nn.CrossEntropyLoss(),
       train_t_u=train_data_X,
        train_t_c=train_data_Y,
        val_t_u=val_data_X,
        val_t_c=val_data_Y
    )

快去玩吧!!!! 

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值