单层神经网络的实现,数据集为sklean.datasets.load_iris()
from sklearn.datasets import load_iris #数据集
import torch.nn as nn #继承Module类
import torch.nn.functional as F #激活函数和损失函数
from torch.optim import Adam,SGD #优化器
- 准备数据集,并保存为tensor格式
iris = load_iris()
x = iris["data"]
y = iris["target"]
x = torch.FloatTensor(x)
y = torch.LongTensor(y)
- 定义模型
class net(nn.Module):
def __init__(self,input_feature,hidden_feature,output_feature):
super(net,self).__init__()
self.fc1 = torch.nn.Linear(input_feature,hidden_feature)
self.fc2 = torch.nn.Linear(hidden_feature,output_feature)
def forward(self,input):
output = F.sigmoid(self.fc1(input))
output = self.fc2(output)
return F.log_softmax(output,dim =1)
- 初始化参数
input_feature =4
hidden_feature =5
output_feature=4
learn_rate=0.5
epoch=1000
- 初始化模型
net =net(input_feature,hidden_feature,output_feature)
- 放到cuda上运行(可省略)
#判断cuda是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
x=x.to(device)
y=y.to(device)
net=net.to(device)
- 定义优化器
optimizer = SGD(net.parameters(),learn_rate)
- 训练函数train()
def train(epoch):
for i in range(epoch):
optimizer.zero_grad()
output = net(x)
loss = F.nll_loss(output,y)
loss.backward()
optimizer.step()
if i %10==0:
print(i,"--> loss:",loss)
- 运行
train(epoch)
- 结果
0 --> loss: 0.059587012976408005
50 --> loss: 0.059238117188215256
100 --> loss: 0.058904100209474564
150 --> loss: 0.05858393386006355
200 --> loss: 0.05827658995985985
250 --> loss: 0.057981111109256744
300 --> loss: 0.05769683048129082
350 --> loss: 0.05742289870977402
400 --> loss: 0.05715873837471008
450 --> loss: 0.05690363422036171
500 --> loss: 0.0566570907831192
550 --> loss: 0.056418608874082565
600 --> loss: 0.056187763810157776
650 --> loss: 0.0559641532599926
700 --> loss: 0.05574728175997734
750 --> loss: 0.055536847561597824
800 --> loss: 0.0553324818611145
850 --> loss: 0.05513394623994827
900 --> loss: 0.05494094267487526
950 --> loss: 0.0547531396150589
完整代码
from sklearn.datasets import load_iris
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam,SGD#判断cuda是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#准备数据集
iris = load_iris()
x = iris["data"]
y = iris["target"]
x = torch.FloatTensor(x)
y = torch.LongTensor(y)
#定义模型
class net(nn.Module):
def __init__(self,input_feature,hidden_feature,output_feature):
super(net,self).__init__()
self.fc1 = torch.nn.Linear(input_feature,hidden_feature)
self.fc2 = torch.nn.Linear(hidden_feature,output_feature)
def forward(self,input):
output = F.sigmoid(self.fc1(input))
output = self.fc2(output)
return F.log_softmax(output,dim =1)
#初始化参数
input_feature =4
hidden_feature =5
output_feature=4
learn_rate=0.5
batch_size=100
epoch=1000
#初始化模型
net =net(input_feature,hidden_feature,output_feature)
x=x.to(device)
y=y.to(device)
net=net.to(device)
#optimizer = Adam(net.parameters(),learn_rate)
optimizer = SGD(net.parameters(),learn_rate)
def train(epoch):
for i in range(epoch):
optimizer.zero_grad()
output = net(x)
loss = F.nll_loss(output,y)
loss.backward()
optimizer.step()
if i %10==0:
print(i,"--> loss:",loss)
train(epoch)