基于pytorch比较sigmoid和relu函数梯度下降速度

50 篇文章 4 订阅
40 篇文章 2 订阅
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import matplotlib.pyplot as plt


X = torch.Tensor([[1, 2], [2, 2], [2, 1], [1, 1], [4, 4], [4, 5], [5, 4], [5, 5]]).float()
y1 = torch.Tensor([1, 1, 1, 1, 0, 0, 0, 0]).long()
y2 = torch.Tensor([1, 1, 1, 1, 0, 0, 0, 0]).long()


class Perceptron_sigmod(nn.Module):
    def __init__(self, in_features, hidden_features, out_features):
        nn.Module.__init__(self)
        self.layer1 = nn.Linear(in_features, hidden_features)
        self.layer2 = nn.Linear(hidden_features, out_features)
    def forward(self, x):
        x = self.layer1(x)
        x = torch.sigmoid(x)
        return self.layer2(x)

class Perceptron_relu(nn.Module):
    def __init__(self, in_features, hidden_features, out_features):
        nn.Module.__init__(self)
        self.layer1 = nn.Linear(in_features, hidden_features)
        self.layer2 = nn.Linear(hidden_features, out_features)
    def forward(self, x):
        x = self.layer1(x)
        x = torch.relu(x)
        return self.layer2(x)


perceptron_s = Perceptron_sigmod(2, 4, 2)
perceptron_r = Perceptron_relu(2, 4, 2)

optimizer1 = torch.optim.SGD(perceptron_s.parameters(), lr=0.1)
optimizer2 = torch.optim.SGD(perceptron_r.parameters(), lr=0.1)
c1 = torch.nn.CrossEntropyLoss()
c2 = torch.nn.CrossEntropyLoss()


loss_r = []
loss_s = []

for step in range(5000):
    optimizer1.zero_grad()
    optimizer2.zero_grad()
    r1 = perceptron_s(Variable(X, requires_grad=True))
    r2 = perceptron_r(Variable(X, requires_grad=True))
    loss1 = c1(r1, y1)
    loss2 = c2(r2, y2)
    loss1.backward()
    loss2.backward()
    optimizer1.step()
    optimizer2.step()
    loss_s.append(loss1)
    loss_r.append(loss2)
    print(step)

plt.plot(loss_s, label='sigmoid')
plt.plot(loss_r, label='relu')
plt.legend(loc=0)
plt.show()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值