2022.3.1科研记录
《pytorch深度学习入门》曾芃壹
3.4逻辑回归
3.最大概似法与交叉熵
抛形状不规则的硬币
似然度:每个样本结果概率的乘积
让似然度P(y|X,w)最大就是让损失函数L(w)最小(因为二者是-log的关系)
↑像这样利用最大概似法得到的损失函数称为交叉熵
交叉熵损失函数是分类问题中常用的损失函数,pytorch预置了nn.CrossEntropyLoss()
4.逻辑回归示例
import torch
import matplotlib.pyplot as plt
from torch import nn, optim
cluster = torch.ones(500, 2)
data0 = torch.normal(4*cluster, 2)
data1 = torch.normal(-4*cluster, 2)
label0 = torch.zeros(500) # 生成500个0标签
label1 = torch.ones(500) # 生成500个1标签
x = torch.cat((data0, data1),).type(torch.FloatTensor) # 将data0,data1合并到一起
y = torch.cat((label0, label1),).type(torch.LongTensor) # 将label0,label1合并到一起
plt.scatter(x.numpy()[:, 0], x.numpy()[:, 1], c=y.numpy(), s=10, lw=0, cmap='RdYlGn') # 0标签的是红色,1标签的是绿色
plt.show()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(2, 2)
def forward(self, x):
# 激活函数处理数据
x = self.linear(x)
x = torch.sigmoid(x)
return x
CUDA = torch.cuda.is_available()
if CUDA:
net = Net().cuda()
inputs = x.cuda()
target = y.cuda()
else:
net = Net()
inputs = x
target = y
optimizer = optim.SGD(net.parameters(), lr=0.02)
criterion = nn.CrossEntropyLoss()
def draw(output):
if CUDA:
output = output.cpu() # 若使用了CUDA加速这一步要还原为CPU数据类型
plt.cla()
output = torch.max((output),1)[1]
pred_y = output.data.numpy().squeeze()
target_y = y.numpy()
plt.scatter(x.numpy()[:,0],x.numpy()[:,1],c=y.numpy(),s=10,lw=0,cmap='RdYlGn')
accuracy = sum(pred_y == target_y)/1000.0
plt.text(1.5, -4, 'Accuracy=%s' % (accuracy), fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
def train(model, criterion, optimizer, epochs): # 训练
for epoch in range(epochs):
output = model(inputs) # inputs输入神经网络模型
loss = criterion(output, target) # 计算损失函数
optimizer.zero_grad() # 清空权重的grad值
loss.backward() # 计算梯度
optimizer.step() # 进行权值更新
if epoch % 40 == 0:
draw(output)
train(net,criterion,optimizer,1000)