09多分类问题

多分类问题

import torch
import numpy as np
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import os

os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

# one-hot编码
# softmax简单示例
y = np.array([1, 0, 0])
z = np.array([0.2, 0.1, -0.1])
# softmax
y_pred = np.exp(z) / np.exp(z).sum()
# Negative Log Likelihood Loss
loss = (- y * np.log(y_pred)).sum()
print(loss)

y = torch.LongTensor([0])    # 必须用长整型张量
z = torch.tensor([[0.2, 0.1, -0.1]])
# CrossEntropyLoss = softmax + NLLLoss
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(z, y)
print(loss)




# mnist数据集训练
batch_size = 64
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))   # 标准化,使得均值为0,方差为1
])

# 下载数据
train_dataset = datasets.MNIST(root="./datasets/mnist/", train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root="./datasets/mnist/", train=False, download=True, transform=transform)
# 加载数据
tra_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)

# 定义网络
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.linear1 = torch.nn.Linear(784, 512)
        self.linear2 = torch.nn.Linear(512, 256)
        self.linear3 = torch.nn.Linear(256, 128)
        self.linear4 = torch.nn.Linear(128, 64)
        self.linear5 = torch.nn.Linear(64, 10)
        self.relu = torch.nn.ReLU()

    def forward(self, x):
        x = x.view(-1, 784)     # 降维 1x28x28 -> 784
        x = self.relu(self.linear1(x))
        x = self.relu(self.linear2(x))
        x = self.relu(self.linear3(x))
        x = self.relu(self.linear4(x))
        x = self.linear5(x)   # CrossEntropyLoss = Logsoftmax + NLLLoss,所以最后一层在此不使用激活函数
        return x
model = Net()
# model = model.cuda()

criterion = torch.nn.CrossEntropyLoss()
# criterion = criterion.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)   # momentum,冲量,可以形象理解为增加惯性,使其越过鞍点

def train(epoch):
    running_loss = 0.0
    for i, (inputs, targets) in enumerate(tra_dataloader):
        # inputs = inputs.cuda()
        # targets = targets.cuda()
        optimizer.zero_grad()
        # forward
        y_pred = model(inputs)
        l = criterion(y_pred, targets)
        # backward
        l.backward()
        # update
        optimizer.step()

        running_loss += l.item()
        if i % 300 == 299:     # 每300 iteration 打印一次loss
            print('[%d, %5d] loss: %3f' % (epoch + 1, i + 1, running_loss / 300))
            running_loss = 0.0

def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_dataloader:
            images, labels = data
            # images, labels = images.cuda(), labels.cuda()
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)   # 取每个样本的分类最大值的下标
            total += labels.size(0)    # 总的样本数量  size = N*1
            correct += (predicted == labels).sum().item()    # 被预测正确的样本的数量
    print("Accuracy on test set: %d %%" % (100 * correct / total))   # 计算accuracy



if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值