from __future__ import print_function
import torch as pt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision as ptv
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
train_set = ptv.datasets.MNIST("./train", train=True, transform=ptv.transforms.ToTensor(), download=False)
test_set = ptv.datasets.MNIST("./test", train=False, transform=ptv.transforms.ToTensor(), download=False)
train_dataset = pt.utils.data.DataLoader(train_set, batch_size=100)
test_dataset = pt.utils.data.DataLoader(test_set, batch_size=100)
class MLP(pt.nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = pt.nn.Linear(784, 512)
self.fc2 = pt.nn.Linear(512, 128)
self.fc3 = pt.nn.Linear(128, 10)
def forward(self, din):
din = din.view(-1, 28 * 28)
dout = pt.nn.functional.relu(self.fc1(din))
dout = pt.nn.functional.relu(self.fc2(dout))
return pt.nn.functional.softmax(self.fc3(dout))
model = MLP().cpu()
print(model)
# loss func and optim
optimizer = pt.optim.SGD(model.parameters(),lr=0.01,momentum=0.9)
lossfunc = pt.nn.CrossEntropyLoss().cuda()
# accuarcy
def AccuarcyCompute(pred,label):
pred = pred.cpu().data.numpy()
label = label.cpu().data.numpy()
# print(pred.shape(),label.shape())
test_np = (np.argmax(pred,1) == label)
test_np = np.float32(test_np)
return np.mean(test_np)
# test accuarcy
# print(AccuarcyCompute(
# np.array([[1,10,6],[0,2,5]],dtype=np.float32),
# np.array([[1,2,8],[1,2,5]],dtype=np.float32)))
for x in range(4):
for i, data in enumerate(train_dataset):
optimizer.zero_grad()
(inputs, labels) = data
inputs = pt.autograd.Variable(inputs).cpu()
labels = pt.autograd.Variable(labels).cpu()
outputs = model(inputs)
loss = lossfunc(outputs, labels)
loss.backward()
optimizer.step()
if i % 100 == 0:
print(i, ":", AccuarcyCompute(outputs, labels))
在这里插入代码片
pytorch
最新推荐文章于 2024-08-10 23:14:03 发布