B站刘二大人老师的《PyTorch深度学习实践》Lecture_08 重点回顾+代码复现
Lecture_08 加载数据集 Dataset and DataLoader
一、重点回顾
(一)三个术语:Epoch, Batch-Size, Iterations
Epoch:对所有的训练数据进行一次正向传播和反向传播
Batch-Size:每次正向和反向传播的训练样本数
Iteration:训练(即权重更新)的次数,每个epoch进行“训练样本数/Batch-Size”次迭代
(二)DataLoader
(三)定义数据集与加载
1. 自己构建的数据集
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
# XXXDataset继承自Dataset
class XXXDataset(Dataset):
def __init__(self):
pass
# magic function加索引:dataset[index]
def __getitem__(self,index):
pass
# magic function 返回数据集的长度
def __len__(self):
pass
dataset = XXXDataset()
train_loader = DataLoader(dataset=dataset,batch_size=32,shuffle=True,num_workers=2)
# num_workers:要几个多线程并行读取数据
Dataset是一个抽象的类,不可实例化。DataLoader可以实例化。
应用实例:
class DiabetesDataset(Dataset):
def __init__(self,filepath):
xy = np.loadtxt(filepath,delimiter=',',dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:,:-1])
self.y_data = torch.from_numpy(xy[:,[-1]])
def __getitem__(self,index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset,batch_size=32,shuffle=True,num_workers=2)
2. 加载已有的数据集
以MNIST为例:
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
train_dataset = datasets.MNIST(root='../dataset/mnist',train=True,transform=transforms.ToTensor(),download=True)
test_dataset = datasets.MNIST(root='../dataset/mnist',train=False,transform=transforms.ToTensor(),download=True)
train_loader = DataLoader(dataset=train_dataset,batch_size=32,shuffle=True)
test_loader = DataLoader(dataset=test_dataset,batch_size=32,shuffle=False)
for batch_index,(inputs,target) in enumerate(train_loader):
...
(四)训练
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
应用实例
for epoch in range(100):
for i,data in enumerate(train_loader,0):
inputs,labels = data
y_pred = model(inputs)
loss = criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
from epoch in range(100):
for i,(inputs,labels) in enumerate(train_loader,0):
y_pred = model(inputs)
loss = criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
二、代码复现
import torch
from torch.utils.data import Dataset,DataLoader
import numpy as np
# 读取数据
class DiabetesDataset(Dataset):
def __init__(self,filepath):
xy = np.loadtxt(filepath,delimiter=',',dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:,:-1])
self.y_data = torch.from_numpy(xy[:,[-1]])
def __getitem__(self,index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset,batch_size=32,shuffle=True)
# 建立模型
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1 = torch.nn.Linear(8,6)
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
x = self.sigmoid(self.linear1(x))
# O1
x = self.sigmoid(self.linear2(x))
# O2
x = self.sigmoid(self.linear3(x))
# Y_hat
return x
model = Model()
# 构造损失和优化器
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
# 训练
for epoch in range(100):
for i,data in enumerate(train_loader,0):
inputs,labels = data
y_pred = model(inputs)
loss = criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
加了多线程会报错,索性就去掉了…