1.数据引入
import torch from torch
import nn from torch.utils.data
import DataLoader from torchvision
import datasets from torchvision.transforms import ToTensor
2.训练集与测试集
我们用到的数据集是FashionMNIST,是一个图像数据集,用它来进行分类任务。
dataloader用来存放相应的训练数据以及对应的标签
dataset将包装一个可迭代的数据集,
在这里插入代码片
# Download training data from open datasets.
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
'''
dataset作为dataloader的参数传入,在数据集上包裹一个可迭代的对象,支持batchsize 加载,混淆,多进程数据加载
'''
batch_size = 64
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print(f"Shape of X [N, C, H, W]: {X.shape}")
print(f"Shape of y: {y.shape} {y.dtype}")
break
3.创建模型
从nn.Module继承并创建一个类,用来定义网络结构
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
4.模型超参优化
定义损失函数和用的优化器
loss_fn = nn.CrossEntropyLoss()
optimizer =torch.optim.SGD(model.parameters(), lr=1e-3)
训练与反向传播代码
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
在训练过程中模型的测试代码
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
训练过程会持续几个epoch,每经历一个epoch模型学的参数会进行更好的预测。随着训练的进行,模型良好的趋势是模型的accuracy会不断的提高,loss会逐步的下降。
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
5.模型保存
保存模型的一个常见方法是序列化内部状态字典(包含模型参数)。
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
6.模型加载与预测
加载模型的过程包括重新创建模型结构并将状态字典加载到其中。
model = NeuralNetwork()
model.load_state_dict(torch.load("model.pth"))
加载完模型后可用来进行预测
classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
model.eval()
x, y = test_data[0][0], test_data[0][1]
with torch.no_grad():
pred = model(x)
predicted, actual = classes[pred[0].argmax(0)], classes[y]
print(f'Predicted: "{predicted}", Actual: "{actual}"')
7.参考资料
https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html.