import torch
import torch.nn as nn
import torch.nn.functional as F
x=torch.unsqueeze(torch.linspace(-1,1,100),dim=1)#在dim维度插入一维
y=x.pow(2)+0.2*torch.rand(x.size())classNet(nn.Module):
def __init__(self,n_features,n_hidden,n_output):super(Net, self).__init__()
self.hidden=torch.nn.Linear(n_features,n_hidden)#隐藏层
self.predict = torch.nn.Linear(n_hidden, n_output) # 隐藏层
def forward(self,x):
x=F.relu(self.hidden(x))
x=self.predict(x)return x
net=Net(1,10,1)print(net)
optimizer=torch.optim.SGD(net.parameters(),lr=0.5)
loss_func=torch.nn.MSELoss()#均方差
for t in range(100):#100步
prediction=net(x)
loss=loss_func(prediction,y)#真实值在后
optimizer.zero_grad()#梯度降为0
loss.backward()#反向传递过程
optimizer.step()#优化
分类
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable
# 假数据
n_data = torch.ones(100,2) # 数据的基本形态
x0 = torch.normal(2* n_data,1) # 类型0 x data (tensor), shape=(100,2)
y0 = torch.zeros(100) # 类型0 y data (tensor), shape=(100,)
x1 = torch.normal(-2* n_data,1) # 类型1 x data (tensor), shape=(100,1)
y1 = torch.ones(100) # 类型1 y data (tensor), shape=(100,)
# 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据)
x = torch.cat((x0, x1),0).type(torch.FloatTensor) # FloatTensor =32-bit floating
y = torch.cat((y0, y1),).type(torch.LongTensor) # LongTensor =64-bit integer
x, y =Variable(x),Variable(y)classNet(nn.Module):
def __init__(self, n_features, n_hidden, n_output):super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_features, n_hidden) # 隐藏层
self.predict = torch.nn.Linear(n_hidden, n_output) # 隐藏层
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)return x
net =Net(2,10,2)print(net)
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() # 均方差
for t in range(100): # 100步
out =net(x)
loss =loss_func(out, y) # 真实值在后
optimizer.zero_grad() # 梯度降为0
loss.backward() # 反向传递过程
optimizer.step() # 优化
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable
# 假数据
x = torch.unsqueeze(torch.linspace(-1,1,100), dim=1) # x data (tensor), shape=(100,1)
y = x.pow(2)+0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100,1)
def save():#save net1
net1 = torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLU(),
torch.nn.Linear(10,1))
optimizer=torch.optim.SDG(net1.parameters(),lr=0.5)
loss_func=torch.nn.MSELoss()for t in range(100):
prediction =net1(x)
loss=loss_func(prediction,y)
optimizer.ze_grad()
loss.backward()
optimizer.step()
torch.save(net1,'net.pkl')#保存整个
torch.save(net1.state_dict(),'net_params.pkl')
def restore_net():
net2=torch.load('net.pkl')
def restore_params():
net3=torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLU(),
torch.nn.Linear(10,1))
net3.load_state_dict(torch.load('net_params.pkl'))
批处理
import torch
import torch.utils.data as Data
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable
BATCH_SIZE=5
x=torch.linspace(1,10,10)
y=torch.linspace(10,1,10)
# 先转换成 torch 能识别的 Dataset
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
# 把 dataset 放入 DataLoader
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=2, # 多线程来读数据
)for epoch in range(3): # 训练所有!整套!数据 3 次
for step,(batch_x, batch_y) in enumerate(loader): # 每一步 loader 释放一小批数据用来学习
# 假设这里就是你训练的地方...
批训练错误之got an unexpected keyword
if __name__ =='__main__':
x = torch.linspace(1,10,10)
y = torch.linspace(10,1,10)
# 先转换成 torch 能识别的 Dataset
torch_dataset = data.TensorDataset(x, y)
优化器
import torch
import torch.utils.data as Data
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable
if __name__ =='__main__':
LR =0.01
BATCH_SIZE =32
EPOCH =12# fake dataset
x = torch.unsqueeze(torch.linspace(-1,1,1000), dim=1)
y = x.pow(2)+0.1* torch.normal(torch.zeros(*x.size()))
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)classNet(nn.Module):
def __init__(self, n_features, n_hidden, n_output):super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_features, n_hidden) # 隐藏层
self.predict = torch.nn.Linear(n_hidden, n_output) # 隐藏层
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)return x
net_SGD =Net()
net_Momentum =Net()
net_RMSprop =Net()
net_Adam =Net()
nets =[net_SGD, net_Momentum, net_RMSprop, net_Adam]
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9,0.99))
optimizers =[opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
loss_func = torch.nn.MSELoss() # 均方差
losses_his =[[],[],[],[]]for epoch in range(EPOCH):print(epoch)for step,(batch_x, batch_y) in enumerate(loader):
b_x =Variable(batch_x)
b_y =Variable(batch_y)for net, opt, l_his in zip(nets, optimizers, losses_his):
output =net(b_x) # get output for every net
loss =loss_func(output, b_y) # compute loss for every net
opt.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
opt.step() # apply gradients
l_his.append(loss.data.numpy()) # loss recoder
CNN
import torch
import torch.utils.data as Data
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torchvision
from torch.autograd import Variable
if __name__ =='__main__':
EPOCH =1 # 训练整批数据多少次, 为了节约时间, 我们只训练一次
BATCH_SIZE =50
LR =0.001 # 学习率
DOWNLOAD_MNIST = True # 如果你已经下载好了mnist数据就写上 False
train_data = torchvision.datasets.MNIST(
root='./mnist/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=DOWNLOAD_MNIST
)
train_data = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_data = torchvision.datasets.MNIST(root='/4Tdisk/zhouyunfan/mnist/', train=False) # 获取测试数据集
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape from (2000,28,28) to (2000,1,28,28), value in range(0,1)
test_y = test_data.test_labels[:2000]classCNN(nn.Module):
def __init__(self):super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=1, # 层数
out_channels=16, # 输出高度
kernel_size=5, # 扫描区域大小
stride=1, # 跳度范围
padding=2 # 在周围加上一圈0数据
# 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1), # 卷积层:过滤器,收集信息
nn.ReLU(), # 神经网络
nn.MaxPool2d(kernel_size=2), # 池化层:筛选值
)
self.conv2 = nn.Sequential(
nn.Conv2d(16,32,1,2),
nn.ReLU(),
nn.MaxPool2d(2),)
self.out = nn.Linear(32*7*7,10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0),-1)
output=self.out(x)return x