一、课堂代码
import torch
from torch.utils.data import Dataset #抽象类,不能实例化,只能被其他的类继承
from torch.utils.data import DataLoader #帮助加载数据
import numpy as np
class DiabetesDataset(Dataset): #数据小,init时都先加载在内存当中了
def __init__(self, filepath):
xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)
self.len = xy.shape[0] #(N,9),取出N,也就是样本数
self.x_data = torch.from_numpy(xy[:,:-1])
self.y_data = torch.from_numpy(xy[:,[-1]])
def __getitem__(self, index): #使这个类实例化之后能够进行下标操作
return self.x_data[index], self.y_data[index]
def __len__(self): #用来返回数据条数
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset,
batch_size=32,
shuffle=True,
num_workers=0) #读batch里的数据所并行的进程,win系统设置0
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8, 6)
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
criterion = torch.nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
'''
windows和linux实现多进程的库不一样
windows用spawn来替代linux的fork来调用多进程,会导致报错RuntimeError
解决方法:把要迭代的loader的代码封装起来
'''
for epoch in range(100):
for i, data in enumerate(train_loader, 0): #0表示index从0开始读入,train_loader拿出的(x,y)元组放在data里,自动转tensor
#1.prepare data
inputs, labels = data #也可以直接在上面写 i, (imputs, labels)
#2.forward
y_pred = model(inputs)
loss = criterion(y_pred, labels)
print(epoch, i, loss.item())
#3.backward
optimizer.zero_grad()
loss.backward()
#4.update
optimizer.step()
'''
总结:
1.准备数据集:dataset,dataloader
2.设计模型
3.构造损失函数和优化器
4.训练周期
'''
结果:
加载MNIST数据集:
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
train_dataset = datasets.MNIST(root='./dataset/mnist',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='./dataset/mnist',
train=False,
transform=transforms.ToTensor(),
download=True)
train_loader = DataLoader(dataset=train_dataset,
batch_size=32,
shuffle=True) #训练时要把数据打乱确保随机,测试时模型不变,不用打乱
test_loader = DataLoader(dataset=test_dataset,
batch_size=32,
shuffle=False)
for batch_idx, (inputs, target) in enumerate(train_loader):
pass
这节课的课后作业我大概看了一下,比较复杂,还涉及到一些数据分析的内容,我还需要补补课,所以打算分开更~需要参考的话可以关注一下我后续发布的内容~