一、线性模型
import numpy as np
import matplotlib.pyplot as plt
x_data = [1.0,2.0,3.0] # 输入
y_data = [2.0,4.0,6.0] # 输出
def forward(x): # 赋值权重 返回与输入乘积
return x*w
def loss(x,y):
y_pred = forward(x) # 设定w 预测得到y_pred
return (y_pred - y)**2 # 返回预测与实际差值的平方
w_list = [] # 创建空列表,放权重
mse_list = [] # 创建空列表,放
for w in np.arange(0.0,4.1,0.1): # 0到4,步长为0.1
print('w=',w) # 显示权重
l_sum = 0
for x_val,y_val in zip(x_data,y_data): # 遍历列表 x 中的每个元素,并将每个元素打包成一个元组
y_pred_val = forward(x_val)
loss_val = loss(x_val,y_val)
l_sum += loss_val # 损失值之和
print('\t',x_val,y_val,y_pred_val,loss_val)
# 输出以下值:x,y,x*w,(x*w-y)**2
print('MSE=',l_sum/3) # 平均损失
w_list.append(w) # 将权重放进列表里
mse_list.append(l_sum/3) # 将平均损失放进列表里
plt.plot(w_list,mse_list) # 以权重和平均损失为自变量和因变量绘图
plt.ylabel('Loss')
plt.xlabel('w')
plt.show()
二、梯度下降算法
x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]
w = 1.0
def forward(x): # 赋值权重 返回与输入乘积
return w*x
def cost(xs,ys): # MSE
cost = 0
for x,y in zip(xs,ys):
y_pred = forward(x)
cost +=(y_pred-y)**2
return cost/len(xs)
def gradient(xs,ys): # 梯度
grad = 0
for x,y in zip(xs,ys):
grad += 2*x*(x*w-y) # 求2*x*(x*w-y)的累加和
return grad/len(xs)
print('Predict (before training)',4,forward(4))
cost1 = [] # 绘图y轴数据空列表
epoch1 =[] # 绘图x轴数据空列表
for epoch in range(100):
cost_val = cost(x_data,y_data)
cost1.append(cost_val)
grad_val = gradient(x_data,y_data)
w -= 0.01*grad_val
print('Epoch',epoch,'w=',w,'cost',cost_val)
epoch1.append(epoch)
print('Predict (after training)',4,forward(4))
# 绘图
plt.plot(epoch1,cost1) # 以权重和平均损失为自变量和因变量绘图
plt.ylabel('cost')
plt.xlabel('epoch')
plt.show()
三、反向传播
import torch
x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]
# 如果 w.requires_grad = True,那么在计算 y 的梯度时,PyTorch 会自动计算出 w 的梯度,并将其保存在 w.grad 属性中
w = torch.Tensor([1.0]) # 权重
w.requires_grad = True
def forward(x):
return x*w
def loss(x,y): # 构建计算图
y_pred = forward(x)
return (y_pred - y)**2
print('predict (before training)',4,forward(4).item())
for epoch in range(100):
for x,y in zip(x_data,y_data):
l = loss(x,y) # 前馈
l.backward() # 反馈
print('\tgrad:',x,y,w.grad.item())
w.data = w.data - 0.01*w.grad.data
w.grad.data.zero_() # 梯度清零
print("progress",epoch,l.item())
print('Predict (after training)',4,forward(4))
四、用Pytorch实现线性回归
import torch
# 准备数据集
x_data = torch.Tensor([[1.0],[2.0],[3.0]])
y_data = torch.Tensor([[2.0],[4.0],[6.0]])
# 设计模型
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel,self).__init__()
self.linear = torch.nn.Linear(1,1)
def forward(self,x):
y_pred = self.linear(x)
return y_pred
model = LinearModel()
# 构造损失函数和优化器
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(),lr=0.01) # 优化器
# 训练循环
for epoch in range(1000):
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
print(epoch,loss.item()) # 取标量值.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Output weight and bias
print('w = ',model.linear.weight.item())
print('b = ',model.linear.bias.item())
# Test Model
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print('y_pred = ',y_test.data)
五、逻辑斯蒂回归
import torchvision
import torch.nn.functional as F
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel,self).__init__()
self.linear = torch.nn.Linear(1,1)
def forward(selfself,x):
y_pred = F.sigmoid(self.linear(x))
return y_pred
import torch.nn.functional as F
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
x_data = torch.Tensor([[1.0],[2.0],[3.0]])
y_data = torch.Tensor([[0],[0],[1]])
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel,self).__init__()
self.linear = torch.nn.Linear(1,1)
def forward(self,x):
y_pred = F.sigmoid(self.linear(x))
return y_pred
model = LogisticRegressionModel()
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
for epoch in range(1000):
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
print(epoch,loss.item()) # 取标量值.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
x = np.linspace(0,10,200)
x_t = torch.Tensor(x).view((200,1)) # 矩阵
y_t = model(x_t)
y = y_t.data.numpy()
plt.plot(x,y)
plt.plot([0,10],[0.5,0.5],c='r')
plt.xlabel('Hours')
plt.ylabel('Probability of Pass')
plt.grid()
plt.show()
六、处理多维特征的输入
import numpy as np
xy = np.loadtxt('diabetes.csv.gz',delimiter=' ',dtype=np.float32)
x_data = torch.from_numpy(xy[:,:-1])
y_data = torch.from_numpy(xy[:,[-1]])
# print(x_data)
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.Linear1 = torch.nn.Linear(8,6)
self.Linear2 = torch.nn.Linear(6,4)
self.Linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
x = self.sigmoid(self.Linear1(x))
x = self.sigmoid(self.Linear2(x))
x = self.sigmoid(self.Linear3(x))
model = Model()
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
for epoch in range(1000):
# Forward
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
print(epoch,loss.item()) # 取标量值.item()
# Backward
optimizer.zero_grad()
loss.backward()
# Update
optimizer.step()
七、加载数据集
import torch
import numpy
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class DiabetesDataset(Dataset):
def __init__(self,filepath):
xy = np.loadtxt('diabetes.csv.gz', delimiter=',', dtype=np.float32)
self.len = xy.shape[0]
x_data = torch.from_numpy(xy[:, :-1])
y_data = torch.from_numpy(xy[:, [-1]])
def __getitem__(self, index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset,batch_size=32,shuffle=True,num_workers=2)
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.Linear1 = torch.nn.Linear(8,6)
self.Linear2 = torch.nn.Linear(6,4)
self.Linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
x = self.sigmoid(self.Linear1(x))
x = self.sigmoid(self.Linear2(x))
x = self.sigmoid(self.Linear3(x))
return x
model = Model()
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
for epoch in range(100):
for i,data in enumerate(train_loader,0):
# 1.Prepare data
inputs,labels = data
# 2.Forward
y_pred = model(inputs)
loss = criterion(y_pred,labels)
print(epoch,i,loss.item())
# 3.Backward
optimizer.zero_grad()
loss.backward()
# 4.Update
optimizer.step()
八、多分类问题
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])
train_dataset = datasets.MNIST(root='../dataset/mnist/',
train=True,
download=True,
transform=transform)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/',
train=False,
download=True,
transform=transform)
test_loader = DataLoader(train_dataset,
shuffle=False,
batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net,self).__init__()
self.l1 = torch.nn.Linear(784,512)
self.l2 = torch.nn.Linear(512,256)
self.l3 = torch.nn.Linear(256,128)
self.l4 = torch.nn.Linear(128,64)
self.l5 = torch.nn.Linear(64,10)
def forward(self,x):
x = x.view(-1,784)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x)
model = Net()
criterion = torch.nn.CrossEntropyLoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
def train(epoch):
running_loss = 0.0
for batch_idx,data in enumerate(train_loader,0):
inputs,target = data
optimizer.zero_grad()
# forward + backward + update
outputs = model(inputs)
loss = criterion(outputs,target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 ==299:
print('[%d,%5d] loss:%.3f' % (epoch + 1,batch_idx + 1,running_loss / 300) )
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad:
for data in test_loader:
images,labels = data
outputs = model(images)
_,predicted = torch.max(outputs.data,dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy on test set:%d %%' % (100*correct/total))
if __name__=='__main__':
for epoch in range(10):
train(epoch)
test()