softmax回归

#softmax运算获取一个向量并将其映射为概率
#softmax回归适用于分类问题,它使用了softmax运算中输出类别的概率分布
#交叉熵是一个衡量两个概率分布之间差异的很好的度量,它测量给定模型编码数据所需的比特数

import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l

# 通过ToTensor实例将图像数据从PIL类型变换成32位浮点数格式,
# 并除以255使得所有像素的数值均在0到1之间
trans = transforms.ToTensor()
trans = transforms.ToTensor()
mnist_train = torchvision.datasets.FashionMNIST(
    root="../data", train=True, transform=trans, download=False)
mnist_test = torchvision.datasets.FashionMNIST(
    root="../data", train=False, transform=trans, download=False)

print(len(mnist_train), len(mnist_test))
print(mnist_train[0][0].shape)

#函数用于在数字标签索引及其文本名称之间进行转换
def get_fashion_mnist_labels(labels):  #@save
    """返回Fashion-MNIST数据集的文本标签"""
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]

#读取小批量
batch_size = 256

#使用4个进程来读取数据
def get_dataloader_workers():
    return 0

train_iter = data.DataLoader(mnist_train,batch_size=batch_size,shuffle=True,
                             num_workers=get_dataloader_workers())
timer = d2l.Timer()
for x, y in train_iter:
    print(x.shape)
    break

#transforms还有一个属性 resize 可以改变接受图片的像素大小 调整图片大小为另一种形状
import torch
from IPython import display
from  d2l import torch as d2l
from torchvision import transforms
import torchvision
from torch.utils import data

batch_size =256

trans = transforms.ToTensor()
mnist_test = torchvision.datasets.FashionMNIST(
    root="../data", train=False, transform=trans, download=False)
mnist_train = torchvision.datasets.FashionMNIST(
    root="../data", train=True, transform=trans, download=False)

def get_dataloader_workers():
    return 0

test_iter = data.DataLoader(mnist_test,batch_size=batch_size,shuffle=True,
                             num_workers=get_dataloader_workers())
train_iter = data.DataLoader(mnist_train,batch_size=batch_size,shuffle=True,
                             num_workers=get_dataloader_workers())


num_inputs = 784
num_outputs = 10

w = torch.normal(0,0.01,size = (num_inputs,num_outputs) ,requires_grad= True)
b = torch.zeros(num_outputs,requires_grad=True)

#对列进行求和 表示最后剩下列的维度 所以对行求和
# x = torch.tensor([[1.0,2.0,3.0],[4.0,5.0,6.0]])
# x = x.sum(0,keepdim = True)
# print(x)

def softmax(X):
    X_exp = torch.exp(X)
    partition = X_exp.sum(1,keepdim = True)
    return X_exp/partition  #这里应用了广播机制

#验证softmax函数
x = torch.normal(0,1,(2,5))
x_prob = softmax(x)
print(x_prob,x_prob.sum(1))

#定义模型
def net(x):
    return softmax(torch.matmul(x.reshape((-1,w.shape[0])),w)+b)

#定义损失函数
def cross_entropy(y_hat,y):
    return -torch.log(y_hat[range(len(y_hat)),y])


#计算预测数正确的数量
def accuracy(y_hat, y):
    if len(y_hat.shape)>1 and y_hat.shape[1]>1:
        y_hat = y_hat.argmax(axis = 1)   #将y的热编码聚集到属于的class
    cmp = y_hat.type(y.dtype)==y
    return float(cmp.type(y.dtype).sum())

y = torch.tensor([0, 2])
y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
print(accuracy(y_hat,y)/len(y))


class Accumulator:
    ''' 在n个变量上累加'''

    def __init__(self, n):
        self.data = [0.0] * n

    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def reset(self):
        self.data = [0.0] * len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]

#计算在指定数据集上的模型精度  评估任意模型net的精度
def evaluate_accuracy(net , data_iter):
    #计算在指定数据集上模型的精度
    if isinstance(net,torch.nn.Module):
        net.eval()  #将模式设置为评估模式
    metric = Accumulator(2)  #二维 里面是正确预测数、预测总数
    with torch.no_grad():
        for x,y in data_iter:
            metric.add(accuracy(net(x),y),y.numel())
    return metric[0]/metric[1]

# print(evaluate_accuracy(net,test_iter))

def train_epoch_ch3(net,train_iter,loss,updater):
    '''训练模型一个周期'''
    if isinstance(net,torch.nn.Module):
        net.train()
        #将网络调为训练模式
    #训练损失总和,训练准确度总和,样本数
    metric = Accumulator(3)
    for x ,y in train_iter:
        y_hat = net(x)
        l = loss(y_hat,y)
        if isinstance(updater,torch.optim.Optimizer):
            #使用pytorch内置的优化器和损失函数
            updater.zero_grad()
            l.mean().backward()
            updater.step()
        else:
            #使用定制的优化器和损失函数
            l.sum().backward()
            updater(x.shape[0])
        metric.add(float(l.sum()),accuracy(y_hat,y),y.numel())
    return metric[0] / metric[2], metric[1] / metric[2]

#定义一个动画中绘制数据的实用程序类Animator
class Animator:  #@save
    """在动画中绘制数据"""
    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
                 ylim=None, xscale='linear', yscale='linear',
                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
                 figsize=(3.5, 2.5)):
        # 增量地绘制多条线
        if legend is None:
            legend = []
        d2l.use_svg_display()
        self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
        if nrows * ncols == 1:
            self.axes = [self.axes, ]
        # 使用lambda函数捕获参数
        self.config_axes = lambda: d2l.set_axes(
            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
        self.X, self.Y, self.fmts = None, None, fmts

    def add(self, x, y):
        # 向图表中添加多个数据点
        if not hasattr(y, "__len__"):
            y = [y]
        n = len(y)
        if not hasattr(x, "__len__"):
            x = [x] * n
        if not self.X:
            self.X = [[] for _ in range(n)]
        if not self.Y:
            self.Y = [[] for _ in range(n)]
        for i, (a, b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X[i].append(a)
                self.Y[i].append(b)
        self.axes[0].cla()
        for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes[0].plot(x, y, fmt)
        self.config_axes()
        display.display(self.fig)
        display.clear_output(wait=True)

#实现一个训练函数,会在train_iter访问到的数据训练一个模型net
#在每个迭代周期结束后,利用test_iter访问到的测试数据对模型进行评估
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):  #@save
    """训练模型(定义见第3章)"""
    animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
                        legend=['train loss', 'train acc', 'test acc'])
    for epoch in range(num_epochs):
        train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
        test_acc = evaluate_accuracy(net, test_iter)
        animator.add(epoch + 1, train_metrics + (test_acc,))
    train_loss, train_acc = train_metrics
    assert train_loss < 0.5, train_loss
    assert train_acc <= 1 and train_acc > 0.7, train_acc
    assert test_acc <= 1 and test_acc > 0.7, test_acc

lr = 0.1

def updater(batch_size):
    return d2l.sgd([w, b], lr, batch_size)

num_epochs = 10
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, updater)

#预测
def predict_ch3(net, test_iter, n=6):  #@save
    """预测标签(定义见第3章)"""
    for X, y in test_iter:
        break
    trues = d2l.get_fashion_mnist_labels(y)
    preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
    titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
    d2l.show_images(
        X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])

predict_ch3(net, test_iter)
import torch
from IPython import display
from  d2l import torch as d2l
from torchvision import transforms
import torchvision
from torch.utils import data
import matplotlib.pyplot as plt
from torch import nn

batch_size =256

trans = transforms.ToTensor()
mnist_test = torchvision.datasets.FashionMNIST(
    root="../data", train=False, transform=trans, download=False)
mnist_train = torchvision.datasets.FashionMNIST(
    root="../data", train=True, transform=trans, download=False)

def get_dataloader_workers():
    return 0

test_iter = data.DataLoader(mnist_test,batch_size=batch_size,shuffle=True,
                             num_workers=get_dataloader_workers())
train_iter = data.DataLoader(mnist_train,batch_size=batch_size,shuffle=True,
                             num_workers=get_dataloader_workers())

#在线性层之前定义展平层(nn.flatten),来调整网络输入的形状
net = nn.Sequential(nn.Flatten(),nn.Linear(784,10))

#设置参数
def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, std=0.01)

net.apply(init_weights);

#定义损失函数
loss = nn.CrossEntropyLoss(reduction='none')

#定义优化器
trainer = torch.optim.SGD(net.parameters(), lr=0.1)

num_epochs = 10
d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,trainer)
plt.show()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值