【Pytorch】学习笔记(训练代码框架)

Pytorch 学习笔记

  1. 在Pytorch中,Tensor 是包含 data 和 grad 两个部分的,data 放的是真实的数据,而grad中保存的是 计算产生的梯度用于反向传播更新参数使用
  2. 由 loss.backward()所计算的梯度会产生累计,所以在每次参数更新后需要设置 optim.zero_grad来将上一次的梯度清空

pytorch的训练代码主要分为四个模块

  1. Prepare Data
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

batch_size = 64
transform = transforms.Compose([
			transforms.ToTensor(),
			transforms.Normalize((0.1307, ), (0.3081, ))
			])
train_dataset = datasets.MNIST(root='../dataset/mnist/',			
				train=True,
				download=True,
				transform=transform)
train_loader = DataLoader(train_dataset,
			shuffle=True,
			batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/',
				train=False,
				download=True,
				transform=transform)
test_loader = DataLoader(test_dataset,
			shuffle=False,
			batch_size=batch_size)

  1. Design Model
class FCNet(torch.nn.Module):
	def __init__(self):
		super(FCNet, self).__init__()
		self.l1 = torch.nn.Linear(784, 512)
		self.l2 = torch.nn.Linear(512, 256)
		self.l3 = torch.nn.Linear(256, 128)
		self.l4 = torch.nn.Linear(128, 64)
		self.l5 = torch.nn.Linear(64, 10)
	def forward(self, x):
		x = x.view(-1, 784)
		x = F.relu(self.l1(x))
		x = F.relu(self.l2(x))
		x = F.relu(self.l3(x))
		x = F.relu(self.l4(x))
		return self.l5(x)
	
	
class CNNNet(torch.nn.Module):
	def __init__(self):
		super(CNNNet, self).__init__()
		self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
		self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
		self.pooling = torch.nn.MaxPool2d(2)
		self.fc = torch.nn.Linear(320, 10)
	def forward(self, x):
	# Flatten data from (n, 1, 28, 28) to (n, 784)
		batch_size = x.size(0)
		x = F.relu(self.pooling(self.conv1(x)))
		x = F.relu(self.pooling(self.conv2(x)))
		x = x.view(batch_size, -1) # flatten
		x = self.fc(x)
		return x
		
model = Net()
	
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model.to(device)

  1. Construct Loss and Optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
  1. Train and Test
def train(epoch):
	running_loss = 0.0
	for batch_idx, data in enumerate(train_loader, 0):
		inputs, target = data
		
		inputs, target = inputs.to(device), target.to(device)
		optimizer.zero_grad()
		# forward + backward + update
		outputs = model(inputs)
		loss = criterion(outputs, target)
		loss.backward()
		optimizer.step()
		running_loss += loss.item()
		if batch_idx % 300 == 299:
			print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
			running_loss = 0.0
		
		
def test():
	correct = 0
	total = 0
	with torch.no_grad():
		for data in test_loader:
			inputs, target = data
			inputs, target = inputs.to(device), target.to(device)
			outputs = model(inputs)
			_, predicted = torch.max(outputs.data, dim=1)
			total += target.size(0)
			correct += (predicted == target).sum().item()
	print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))
			
			
if __name__ == '__main__':
for epoch in range(10):
	train(epoch)
	test()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值