单层神经网络的实现:

单层神经网络的实现,数据集为sklean.datasets.load_iris()

from sklearn.datasets import load_iris #数据集
import torch.nn as nn #继承Module类
import torch.nn.functional as F	#激活函数和损失函数
from torch.optim import Adam,SGD #优化器
  • 准备数据集,并保存为tensor格式
iris = load_iris()
x = iris["data"]
y = iris["target"]
x = torch.FloatTensor(x)
y = torch.LongTensor(y)
  • 定义模型
class net(nn.Module):
    def __init__(self,input_feature,hidden_feature,output_feature):
        super(net,self).__init__()
        self.fc1 = torch.nn.Linear(input_feature,hidden_feature)
        self.fc2 = torch.nn.Linear(hidden_feature,output_feature)
    def forward(self,input):
        output = F.sigmoid(self.fc1(input))
        output = self.fc2(output)
        return F.log_softmax(output,dim =1)
  • 初始化参数
input_feature =4
hidden_feature =5
output_feature=4
learn_rate=0.5
epoch=1000
  • 初始化模型
net =net(input_feature,hidden_feature,output_feature)
  • 放到cuda上运行(可省略)
#判断cuda是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
x=x.to(device)
y=y.to(device)
net=net.to(device)
  • 定义优化器
optimizer = SGD(net.parameters(),learn_rate)
  • 训练函数train()
def train(epoch):
    for i in range(epoch):
        optimizer.zero_grad()
        output = net(x)
        loss = F.nll_loss(output,y)
        loss.backward()
        optimizer.step()
        if i %10==0:
            print(i,"--> loss:",loss)
  • 运行
train(epoch)
  • 结果
0 --> loss: 0.059587012976408005
50 --> loss: 0.059238117188215256
100 --> loss: 0.058904100209474564
150 --> loss: 0.05858393386006355
200 --> loss: 0.05827658995985985
250 --> loss: 0.057981111109256744
300 --> loss: 0.05769683048129082
350 --> loss: 0.05742289870977402
400 --> loss: 0.05715873837471008
450 --> loss: 0.05690363422036171
500 --> loss: 0.0566570907831192
550 --> loss: 0.056418608874082565
600 --> loss: 0.056187763810157776
650 --> loss: 0.0559641532599926
700 --> loss: 0.05574728175997734
750 --> loss: 0.055536847561597824
800 --> loss: 0.0553324818611145
850 --> loss: 0.05513394623994827
900 --> loss: 0.05494094267487526
950 --> loss: 0.0547531396150589

完整代码

from sklearn.datasets import load_iris
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam,SGD#判断cuda是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#准备数据集
iris = load_iris()
x = iris["data"]
y = iris["target"]
x = torch.FloatTensor(x)
y = torch.LongTensor(y)
#定义模型
class net(nn.Module):
    def __init__(self,input_feature,hidden_feature,output_feature):
        super(net,self).__init__()
        self.fc1 = torch.nn.Linear(input_feature,hidden_feature)
        self.fc2 = torch.nn.Linear(hidden_feature,output_feature)
    def forward(self,input):
        output = F.sigmoid(self.fc1(input))
        output = self.fc2(output)
        return F.log_softmax(output,dim =1)
#初始化参数
input_feature =4
hidden_feature =5
output_feature=4
learn_rate=0.5
batch_size=100
epoch=1000
#初始化模型
net =net(input_feature,hidden_feature,output_feature)
x=x.to(device)
y=y.to(device)
net=net.to(device)
#optimizer = Adam(net.parameters(),learn_rate)
optimizer = SGD(net.parameters(),learn_rate)
def train(epoch):
    for i in range(epoch):
        optimizer.zero_grad()
        output = net(x)
        loss = F.nll_loss(output,y)
        loss.backward()
        optimizer.step()
        if i %10==0:
            print(i,"--> loss:",loss)
train(epoch)
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值