总结

#线性回归
import torch
X = torch.tensor([[1,0,0],[1,1,0],[1,0,1,],[1,1,1]] ,dtype = torch.float32)
w = torch.tensor([-0.2,0.15,0.15] ,dtype = torch.float32) # b,w1,w2
z = torch.tensor([-0.2, -0.05, -0.05,  0.1] ,dtype = torch.float32)
def LineaR(X,w):
    zhat = torch.mv(X,w)
    return zhat
zhat = LineaR(X,w)
print(zhat)
torch.allclose(zhat,z)
import torch
X = torch.tensor([[0,0],[1,0],[0,1,],[1,1]] ,dtype = torch.float32)
torch.random.manual_seed(420)
output = torch.nn.Linear(2,1)
zhat = output(X)
print(zhat)
print(output.weight)
print("*"*50)
print(output.bias)
#逻辑回归
#sigmoid函数,与门电路
import torch
X = torch.tensor([[1,0,0],[1,1,0],[1,0,1,],[1,1,1]] ,dtype = torch.float32)
andgate = torch.tensor([[0],[0],[0],[1]],dtype = torch.float32)
w = torch.tensor([-0.2,0.15,0.15] ,dtype = torch.float32) # b,w1,w2

def LogisticR(X,w):
    zhat = torch.mv(X,w)
    sigam = torch.sigmoid(zhat)
    #sigam = 1/(1+torch.exp(-zhat))
    andhat = torch.tensor([int(x) for x in sigam>=0.5],dtype = torch.float32)  #int(True)=1,int(False)=0
    return sigam,andgate
sigam,andhat = LogisticR(X,w)
print(sigam)
print(andgate)
print(andhat)
#阶跃函数,与门电路
import torch
X = torch.tensor([[1,0,0],[1,1,0],[1,0,1,],[1,1,1]] ,dtype = torch.float32)
andgate = torch.tensor([[0],[0],[0],[1]],dtype = torch.float32)
w = torch.tensor([-0.2,0.15,0.15] ,dtype = torch.float32) # b,w1,w2

def LinearRwithsign(X,w):
    zhat = torch.mv(X,w)
    andhat = torch.tensor([int(x) for x in zhat>=0],dtype = torch.float32)  #int(True)=1,int(False)=0
    return zhat,andgate
zhat,andgate = LinearRwithsign(X,w)
print(andgate)
print(andhat)
import torch
from torch.nn import functional as F
X = torch.tensor([[0,0],[1,0],[0,1],[1,1]] ,dtype = torch.float32)
torch.random.manual_seed(420)
dense = torch.nn.Linear(2,1)
zhat = dense(X)
sigam = F.torch.sigmoid(zhat)
y = [int(x) for x in sigam>=0.5]
#sigmoid与门
import torch
from torch.nn import functional as F
X = torch.tensor([[0,0],[1,0],[0,1],[1,1]], dtype = torch.float32)
#andgate = torch.tensor([[0,],[0],[0],[1]])

torch.random.manual_seed(200)
dense = torch.nn.Linear(2,1)
zhat = dense(X)
sigma = F.torch.sigmoid(zhat)
y = [int(x) for x in sigma >0.5]
#softmax函数
import torch
from torch.nn import functional as F
X = torch.tensor([[0,0],[1,0],[0,1],[1,1]], dtype = torch.float32)
torch.random.manual_seed(420)
dense = torch.nn.Linear(2,3)
zhat = dense(X)
print(zhat)
sigma = F.softmax(zhat,dim=1)
print(sigma)
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值