【pytorch版】softmax回归的从零开始实现

该篇文章详细展示了如何在Fashion-MNIST数据集上构建和训练一个简单的softmax回归模型,涉及关键步骤如数据处理、模型定义、损失函数和性能评估。
摘要由CSDN通过智能技术生成
import torch
from IPython import display
from d2l import torch as d2l
batch_size=256
train_iter,test_iter=d2l.load_data_fashion_mnist(batch_size)
num_inputs=784
num_outputs=10
w=torch.normal(0,0.01,size=(num_inputs,num_outputs),requires_grad=True)
b=torch.zeros(num_outputs,requires_grad=True)
X=torch.tensor([[1.0,2.0,3.0],[4.0,5.0,6.0]])
X.sum(0,keepdim=True),X.sum(1,keepdim=True)
def softmax(X):
    X_exp = torch.exp(X)
    partition = X_exp.sum(1, keepdims=True)
    return X_exp / partition
X=torch.normal(0,1,(2,5))
X_prob=softmax(X)
X_prob,X_prob.sum(1)
#实现softmax回归模型
def net(X):
    return softmax(torch.matmul(X.reshape((-1,w.shape[0])),w)+b)
#创建一个数据y_ hat, 其中包含2个样本在3个类别的预测概率,使用 y作为y_ hat中概率的索引

y=torch.tensor([0,2])
y_hat=torch.tensor([[0.1,0.3,0.6],[0.3,0.2,0.5]])
y_hat[[0,1],y]
#定义交叉熵函数
def cross_entropy(y_hat,y):
    return -torch.log(y_hat[range(len(y_hat)),y])
cross_entropy(y_hat,y)
#将预测类别与真实y元素进行比较
def accuracy(y_hat,y):
    """
    计算预测正确的数量
    """
    if len(y_hat.shape)>1 and y_hat.shape[1]>1:
        y_hat=y_hat.argmax(axis=1)#每一行中元素最大的下标
    cmp=y_hat.type(y.dtype)==y
    return float(cmp.type(y.dtype).sum())
accuracy(y_hat,y)/len(y)
#可以评估在任意模型net的准确率
def evaluate_accuracy(net, data_iter):
    """计算在指定数据集上模型的精度。"""
    if isinstance (net, torch.nn. Module):
        net.eval() # 将模型设置为评估模式
        metric = Accumulator(2) # 正确预测数、预测总数
        for X,y in data_iter :
            metric.add(accuracy(net(X),y),y.numel())
            return metric[0] / metric[1]

#Accumulator实例中创建了2个变量,用于分别存储正确预测的数量和预测的总数量
class Accumulator:
    "在n个变量上累加。"""
    def _init_(self,n):
        self.data = [0.0] * n
    def add(self,*args):
        self.data = [a + float(b) for a, b in zip(self.data,args)]
    def reset(self):
        self.data = [0.0] * len(self.data)
    def _getitem__ (self,idx):
        return self.data[idx]
evaluate_accuracy(net,test_iter)

def train_epoch_ch3(net,train_iter,loss,updater):
    if isinstance (net,torch.nn.Module):
        net.train()
    metric = Accumulator(3)
    for X,y in train_iter:
        y_hat = net(X)
        l = loss(y_hat, y)
        if isinstance (updater, torch.optim.optimizer):
            updater.zero_grad()
            l.backward()
            updater.step()
            metric.add(
                float(l) * len(y), accuracy(y_hat, y),
                y.size().numel())
        else:
            l.sum( ).backward( )
            updater(X.shape[0])
            metric.add(float(l.sum()),accuracy(y_hat, y),y.numel())
        return metric[0] / metric[2],metric[1] / metric[2]

#训练函数
def train_ch3(net,train_iter, test_iter, loss,num_epochs, updater):
    animator = Animator (xlabel='epoch',xlim=[1,num_epochs], ylim=[0.3],
                         legend=[ ' train loss', 'train acc','test acc'])
    for epoch in range (num_epochs):
        train_metrics = train_epoch_ch3(net,train_iter, loss, updater)
        test_acc = evaluate_accuracy(net,test_iter)
        animator.add(epoch + 1,train_metrics + (test_acc))
    train_loss,train_acc = train_metrics 

num_epochs=10
train_ch3(net,train_iter,test_iter,cross_entropy,num_epochs,updater=True)
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值