Task01:线性回归;Softmax与分类模型、多层感知机
Task02:文本预处理;语言模型;循环神经网络基础
一、线性回归
1、优化函数的有以下两个步骤:
- (i)初始化模型参数,一般来说使用随机初始化;
- (ii)我们在数据上迭代多次,通过在负梯度方向移动参数来更新每个参数。
2、矢量计算
#矢量计算
'''
向量相加的一种方法是,将这两个向量按元素逐一做标量加法。
向量相加的另一种方法是,将这两个向量直接做矢量加法。
'''
import torch
import time
#初始化ab
n = 1000
a = torch.ones(n)
b = torch.ones(n)
#example:定义时间类来计算标量加法和矢量加法所需要的时间
class Timer(object):
def __init__(self):
self.times = []
self.start()
def start(self):
#开启计时器
self.start_time = time.time()
def stop(self):
#stop计时器并将时间差存入list
self.times.append(time.time() - self.start_time)
return self.times[-1]
def avg(self):
return sum(self.times)/len(self.times)
#使用for循环来做标量加法,计算消耗时间
timer = Timer()
c = torch.zeros(n)
for i in range(n):
c[i] = a[i] + b[i]
'%.5f sec' % timer.stop()#result '0.01104 sec'
#使用for循环来做标量加法,计算消耗时间
timer = Timer()
c = torch.zeros(n)
for i in range(n):
c[i] = a[i] + b[i]
'%.5f sec' % timer.stop()#result '0.01058 sec'速度快一点
3、线性回归模型从零开始实现
#import package and module
%matplotlib inline
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
print(torch.__version__)
#生成数据集
'''
使用线性模型来生成数据集,生成一个1000个样本的数据集,下面是用来生成数据的线性关系:
price=𝑤⋅area+𝑤⋅age+𝑏
'''
#set input feature number
num_inputs = 2
# set example number
num_examples = 1000
#set true vweight
true_w = [2, -3.4]
true_b = 4.2
features = torch.randn(num_examples, num_inputs, dtype = torch.float32)
#labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size = labels.size()),dtype = torch.float32)
#可视化数据
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1);
#读取数据集
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices) # random read 10 samples
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)])
yield features.index_select(0, j), labels.index_select(0, j)
#test
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '\n', y)
break
#初始化模型参数
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype = torch.float32)
w.requires_grad_(requires_grad = True)
b.requires_grad_(requires_grad = True)
#定义模型
def linreg(X, w, b):
return torch.mm(X, w) + b
#定义损失函数:均方误差损失函数
def squared_loss(y_hat, y):
return (y_hat - y.view(y_hat.size())) ** 2 / 2
#定义优化函数:使用小批量随机梯度下降
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * param.grad / batch_size#ues .data to operate param without gradient track
#training
lr = 0.03
num_epochs = 5
net = linreg
loss = squared_loss
#training
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum()
l.backward()
sgd([w, b], lr, batch_size)
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
#print('epoch %d, loss %f') % (epoch + 1, train_l.mean().item())
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
w, true_w, b, true_b
4、线性回归-Pytorch版本
import torch
from torch import nn
import numpy as np
torch.manual_seed(1)
print(torch.__version__)
torch.set_default_tensor_type('torch.FloatTensor')
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
import torch.utils.data as Data
batch_size = 10
# combine featues and labels of dataset
dataset = Data.TensorDataset(features, labels)
# put dataset into DataLoader
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # whether shuffle the data or not
num_workers=2, # read data in multithreading
)
for X, y in data_iter:
print(X, '\n', y)
break
#定义模型
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__() # call father function to init
self.linear = nn.Linear(n_feature, 1) # function prototype: `torch.nn.Linear(in_features, out_features, bias=True)`
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net)
# method one
net = nn.Sequential(
nn.Linear(num_inputs, 1)
# other layers can be added here
)
# method two
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))
# net.add_module ......
# method three
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
('linear', nn.Linear(num_inputs, 1))
# ......
]))
print(net)
print(net[0])
from torch.nn import init
init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0) # or you can use `net[0].bias.data.fill_(0)` to modify it directly
for param in net.parameters():
print(param)
#定义损失函数
loss = nn.MSELoss() # nn built-in squared loss function
# function prototype: `torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')
#定义优化函数
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.03) # built-in random gradient descent function
print(optimizer) # function prototype: `torch.optim.SGD(params, lr=, momentum=0, dampening=0, weight_decay=0, nesterov=False)
#training
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad() # reset gradient, equal to net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
# result comparision
dense = net[0]
print(true_w, dense.weight.data)
print(true_b, dense.bias.data)
二、Softmax与分类模型