pytorch实现
第3章 线性分类
3.1 基于Logistic回归的二分类任务
3.1.1 数据集构建
构建一个简单的分类任务,并构建训练集、验证集和测试集。
本任务的数据来自带噪音的两个弯月形状函数,每个弯月对一个类别。我们采集1000条样本,每个样本包含2个特征。
import math
import copy
import torch
def make_moons(n_samples=1000, shuffle=True, noise=None):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
# 采集第1类数据,特征为(x,y)
# 使用'torch.linspace'在0到pi上均匀取n_samples_out个值
# 使用'torch.cos'计算上述取值的余弦值作为特征1,使用'torch.sin'计算上述取值的正弦值作为特征2
outer_circ_x = torch.cos(torch.linspace(0, math.pi, n_samples_out))
outer_circ_y = torch.sin(torch.linspace(0, math.pi, n_samples_out))
inner_circ_x = 1 - torch.cos(torch.linspace(0, math.pi, n_samples_in))
inner_circ_y = 0.5 - torch.sin(torch.linspace(0, math.pi, n_samples_in))
print('outer_circ_x.shape:', outer_circ_x.shape, 'outer_circ_y.shape:', outer_circ_y.shape)
print('inner_circ_x.shape:', inner_circ_x.shape, 'inner_circ_y.shape:', inner_circ_y.shape)
# 使用'torch.concat'将两类数据的特征1和特征2分别延维度0拼接在一起,得到全部特征1和特征2
# 使用'torch.stack'将两类特征延维度1堆叠在一起
X = torch.stack(
[torch.cat([outer_circ_x, inner_circ_x]),
torch.cat([outer_circ_y, inner_circ_y])],
axis=1
)
print('after concat shape:', torch.cat([outer_circ_x, inner_circ_x]).shape)
print('X shape:', X.shape)
# 使用'torch. zeros'将第一类数据的标签全部设置为0
# 使用'torch. ones'将第一类数据的标签全部设置为1
y = torch.cat(
[torch.zeros([n_samples_out]), torch.ones([n_samples_in])]
)
print('y shape:', y.shape)
# 如果shuffle为True,将所有数据打乱
if shuffle:
# 使用'torch.randperm'生成一个数值在0到X.shape[0],随机排列的一维Tensor做索引值,用于打乱数据
idx = torch.randperm(X.shape[0])
X = X[idx]
y = y[idx]
# 如果noise不为None,则给特征值加入噪声
if noise is not None:
# 使用'torch.normal'生成符合正态分布的随机Tensor作为噪声,并加到原始特征上
X += torch.normal(mean=0.0, std=noise, size=X.shape)
return X, y
# 采样1000个样本
n_samples = 1000
X, y = make_moons(n_samples=n_samples, shuffle=True, noise=0.5)
# 可视化生产的数据集,不同颜色代表不同类别
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5))
plt.scatter(x=X[:, 0].tolist(), y=X[:, 1].tolist(), marker='*', c=y.tolist())
plt.xlim(-3,4)
plt.ylim(-3,4)
plt.savefig('linear-dataset-vis.pdf')
plt.show()
随机采集1000个样本,并进行可视化。
# 采样1000个样本
n_samples = 1000
X, y = make_moons(n_samples=n_samples, shuffle=True, noise=0.5)
# 可视化生产的数据集,不同颜色代表不同类别
import matplotlib.pyplot as plt
plt.figure(figsize=(5,5))
plt.scatter(x=X[:, 0].tolist(), y=X[:, 1].tolist(), marker='*', c=y.tolist())
plt.xlim(-3,4)
plt.ylim(-3,4)
plt.savefig('linear-dataset-vis.pdf')
plt.show()
输出结果:
将1000条样本数据拆分成训练集、验证集和测试集,其中训练集640条、验证集160条、测试集200条。
num_train = 640 #训练集
num_dev = 160 #验证集
num_test = 200 #测试集
X_train, y_train = X[:num_train], y[:num_train]
X_dev, y_dev = X[num_train:num_train + num_dev], y[num_train:num_train + num_dev]
X_test, y_test = X[num_train + num_dev:], y[num_train + num_dev:]
y_train = y_train.reshape([-1,1])
y_dev = y_dev.reshape([-1,1])
y_test = y_test.reshape([-1,1])
# 打印X_train和y_train的维度
print("X_train shape: ", X_train.shape, "y_train shape: ", y_train.shape)
# 打印前5个数据的标签
print (y_train[:5])
输出结果:
3.1.2 模型构建
# 定义Logistic函数
def logistic(x):
return 1 / (1 + torch.exp(-x))
# 在[-10,10]的范围内生成一系列的输入值,用于绘制函数曲线
x = torch.linspace(-10, 10, 10000)
plt.figure()
plt.plot(x.tolist(), logistic(x).tolist(), color="#e4007f", label="Logistic Function")
# 设置坐标轴
ax = plt.gca()
# 取消右侧和上侧坐标轴
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
# 设置默认的x轴和y轴方向
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# 设置坐标原点为(0,0)
ax.spines['left'].set_position(('data',0))
ax.spines['bottom'].set_position(('data',0))
# 添加图例
plt.legend()
plt.savefig('linear-logistic.pdf')
plt.show()
输出结果:
Logistic回归算子:
from nndl import op.Op
op=op.Op
class model_LR(op):
def __init__(self, input_dim):
super(model_LR, self).__init__()
self.params = {}
# 将线性层的权重参数全部初始化为0
self.params['w'] = torch.zeros([input_dim, 1])
# self.params['w'] = torch.normal(mean=0, std=0.01, shape=[input_dim, 1])
# 将线性层的偏置参数初始化为0
self.params['b'] = torch.zeros([1])
def __call__(self, inputs):
return self.forward(inputs)
def forward(self, inputs):
# 线性计算
score = torch.matmul(inputs, self.params['w']) + self.params['b']
# Logistic 函数
outputs = logistic(score)
return outputs
op.Op代码:
class Op(object):
def __init__(self):
pass
def __call__(self, inputs):
return self.forward(inputs)
def forward(self, inputs):
raise NotImplementedError
def backward(self, inputs):
raise NotImplementedError
测试样例:
# 固定随机种子,保持每次运行结果一致
torch.seed()
# 随机生成3条长度为4的数据
inputs = torchrandn(shape=[3,4])
print('Input is:', inputs)
# 实例化模型
model = model_LR(4)
outputs = model(inputs)
print('Output is:', outputs)
输出结果:
问题1:Logistic回归在不同的书籍中,有许多其他的称呼,具体有哪些?你认为哪个称呼最好?
在《机器学习》中称为对数几率回归,在《统计学习方法》中称为逻辑斯蒂回归,其实叫logistic回归,我认为还是遵循原本称呼最好,免除了翻译引起的误会和争议。
问题2:什么是激活函数?为什么要用激活函数?常见激活函数有哪些?
1.什么是激活函数?
所谓激活函数(Activation Function),就是在人工神经网络的神经元上运行的函数,负责将神经元的输入映射到输出端。
两个特性:
- 在人工神经网络上运行的函数,既然是一个函数,就能够普通的函数一样使用。
- 把非线性特性引入网络中,简单说就是激活函数有“掰弯”的能力。能够增强我们关注的特征,减弱不关注的特征。
2.为什么要用激活函数?
如果不用激活函数,每一层输出都是上层输入的线性函数,无论神经网络有多少层,输出都是输入的线性组合,这种情况就是最原始的感知机(Perceptron)。如果使用的话,激活函数给神经元引入了非线性因素,使得神经网络可以任意逼近任何非线性函数,这样神经网络就可以应用到众多的非线性模型中。
3.常见的激活函数有哪些?
一:ReLU函数
二:sigmoid函数
对于一个定义域在R中的输入,sigmoid函数将输入变换为区间(0, 1)上的输出
三:Tanh函数
与sigmoid函数类似, tanh(双曲正切)函数也能将其输入压缩转换到区间(-1, 1)上。
3.1.3 损失函数
交叉熵损失函数
# 实现交叉熵损失函数
class BinaryCrossEntropyLoss(op.Op):
def __init__(self):
self.predicts = None
self.labels = None
self.num = None
def __call__(self, predicts, labels):
return self.forward(predicts, labels)
def forward(self, predicts, labels):
self.predicts = predicts
self.labels = labels
self.num = self.predicts.shape[0]
loss = -1. / self.num * (torch.matmul(self.labels.t(), torch.log(self.predicts)) + torch.matmul((1-self.labels.t()), torch.log(1-self.predicts)))
loss = torch.squeeze(loss, axis=1)
return loss
测试样例:
# 生成一组长度为3,值为1的标签数据
labels = torch.ones([3,1])
# 计算风险函数
bce_loss = BinaryCrossEntropyLoss()
print(bce_loss(outputs, labels))
输出结果:
tensor([0.6834])
3.1.4 模型优化
不同于线性回归中直接使用最小二乘法即可进行模型参数的求解,Logistic回归需要使用优化算法对模型参数进行有限次地迭代来获取更优的模型,从而尽可能地降低风险函数的值。
在机器学习任务中,最简单、常用的优化算法是梯度下降法。
使用梯度下降法进行模型优化,首先需要初始化参数W和 b,然后不断地计算它们的梯度,并沿梯度的反方向更新参数。
用程序实现:
class model_SR(op):
def __init__(self, input_dim, output_dim):
super(model_SR, self).__init__()
self.params = {}
# 将线性层的权重参数全部初始化为0
self.params['W'] = torch.zeros([input_dim, output_dim])
# self.params['W'] = torch.normal(mean=0, std=0.01, shape=[input_dim, output_dim])
# 将线性层的偏置参数初始化为0
self.params['b'] = torch.zeros([output_dim])
# 存放参数的梯度
self.grads = {}
self.X = None
self.outputs = None
self.output_dim = output_dim
def __call__(self, inputs):
return self.forward(inputs)
def forward(self, inputs):
self.X = inputs
# 线性计算
score = torch.matmul(self.X, self.params['W']) + self.params['b']
# Softmax 函数
self.outputs = softmax(score)
return self.outputs
def backward(self, labels):
N =labels.shape[0]
labels = torch.nn.functional.one_hot(labels.to(torch.int64), self.output_dim)
self.grads['W'] = -1 / N * torch.matmul(self.X.t(), (labels-self.outputs))
self.grads['b'] = -1 / N * torch.matmul(torch.ones([N]), (labels-self.outputs))
用程序实现:
from abc import abstractmethod
# 优化器基类
class Optimizer(object):
def __init__(self, init_lr, model):
"""
优化器类初始化
"""
# 初始化学习率,用于参数更新的计算
self.init_lr = init_lr
# 指定优化器需要优化的模型
self.model = model
@abstractmethod
def step(self):
"""
定义每次迭代如何更新参数
"""
pass
class SimpleBatchGD(Optimizer):
def __init__(self, init_lr, model):
super(SimpleBatchGD, self).__init__(init_lr=init_lr, model=model)
def step(self):
# 参数更新
# 遍历所有参数,按照公式(3.8)和(3.9)更新参数
if isinstance(self.model.params, dict):
for key in self.model.params.keys():
self.model.params[key] = self.model.params[key] - self.init_lr * self.model.grads[key]
3.1.5 评价指标
在分类任务中,通常使用准确率(Accuracy)作为评价指标。如果模型预测的类别与真实类别一致,则说明模型预测正确。准确率即正确预测的数量与总的预测数量的比值:
def accuracy(preds, labels):
# 判断是二分类任务还是多分类任务,preds.shape[1]=1时为二分类任务,preds.shape[1]>1时为多分类任务
if preds.shape[1] == 1:
# 二分类时,判断每个概率值是否大于0.5,当大于0.5时,类别为1,否则类别为0
# 使用'torch.cast'将preds的数据类型转换为float32类型
print(preds.dtype)
# preds = torch.can_cast((preds>=0.5),torch.float32)
else:
# 多分类时,使用'torch.argmax'计算最大元素索引作为类别
preds = torch.argmax(preds,dim=1).int()
return torch.mean((preds == labels).float())
preds = torch.as_tensor([[0.],[1.],[1.],[0.]])
labels = torch.as_tensor([[1.],[1.],[0.],[0.]])
print("accuracy is:", accuracy(preds, labels))
输出结果:
accuracy is: tensor(0.5000)
3.1.6 完善Runner类
基于RunnerV1,本章的RunnerV2类在训练过程中使用梯度下降法进行网络优化,模型训练过程中计算在训练集和验证集上的损失及评估指标并打印,训练过程中保存最优模型。
class RunnerV2(object):
def __init__(self, model, optimizer, metric, loss_fn):
self.model = model
self.optimizer = optimizer
self.loss_fn = loss_fn
self.metric = metric
# 记录训练过程中的评价指标变化情况
self.train_scores = []
self.dev_scores = []
# 记录训练过程中的损失函数变化情况
self.train_loss = []
self.dev_loss = []
def train(self, train_set, dev_set, **kwargs):
# 传入训练轮数,如果没有传入值则默认为0
num_epochs = kwargs.get("num_epochs", 0)
# 传入log打印频率,如果没有传入值则默认为100
log_epochs = kwargs.get("log_epochs", 100)
# 传入模型保存路径,如果没有传入值则默认为"best_model.pdparams"
save_path = kwargs.get("save_path", "best_model.pdparams")
# 梯度打印函数,如果没有传入则默认为"None"
print_grads = kwargs.get("print_grads", None)
# 记录全局最优指标
best_score = 0
# 进行num_epochs轮训练
for epoch in range(num_epochs):
X, y = train_set
# 获取模型预测
logits = self.model(X)
# 计算交叉熵损失
trn_loss = self.loss_fn(logits, y).item()
self.train_loss.append(trn_loss)
# 计算评价指标
trn_score = self.metric(logits, y).item()
self.train_scores.append(trn_score)
# 计算参数梯度
self.model.backward(y)
if print_grads is not None:
# 打印每一层的梯度
print_grads(self.model)
# 更新模型参数
self.optimizer.step()
dev_score, dev_loss = self.evaluate(dev_set)
# 如果当前指标为最优指标,保存该模型
if dev_score > best_score:
self.save_model(save_path)
print(f"best accuracy performence has been updated: {best_score:.5f} --> {dev_score:.5f}")
best_score = dev_score
if epoch % log_epochs == 0:
print(f"[Train] epoch: {epoch}, loss: {trn_loss}, score: {trn_score}")
print(f"[Dev] epoch: {epoch}, loss: {dev_loss}, score: {dev_score}")
def evaluate(self, data_set):
X, y = data_set
# 计算模型输出
logits = self.model(X)
# 计算损失函数
loss = self.loss_fn(logits, y).item()
self.dev_loss.append(loss)
# 计算评价指标
score = self.metric(logits, y).item()
self.dev_scores.append(score)
return score, loss
def predict(self, X):
return self.model(X)
def save_model(self, save_path):
torch.save(self.model.params, save_path)
def load_model(self, model_path):
self.model.params = torch.load(model_path)
3.1.7 模型训练
# 固定随机种子,保持每次运行结果一致
torch.seed()
# 特征维度
input_dim = 2
# 学习率
lr = 0.001
# 实例化模型
model = model_LR(input_dim=input_dim)
# 指定优化器
optimizer = SimpleBatchGD(init_lr=lr, model=model)
# 指定损失函数
loss_fn = BinaryCrossEntropyLoss()
# 指定评价方式
metric = accuracy
# 实例化RunnerV2类,并传入训练配置
runner = RunnerV2(model, optimizer, metric, loss_fn)
runner.train([X_train, y_train], [X_dev, y_dev], num_epochs=500, log_epochs=50, save_path="best_model.pdparams")
输出结果:
best accuracy performence has been updated: 0.00000 --> 0.75625
[Train] epoch: 0, loss: 0.6931460499763489, score: 0.5171874761581421
[Dev] epoch: 0, loss: 0.6830810308456421, score: 0.7562500238418579
best accuracy performence has been updated: 0.75625 --> 0.76250
best accuracy performence has been updated: 0.76250 --> 0.76875
best accuracy performence has been updated: 0.76875 --> 0.77500
best accuracy performence has been updated: 0.77500 --> 0.78125
[Train] epoch: 50, loss: 0.5068781971931458, score: 0.7828124761581421
[Dev] epoch: 50, loss: 0.49958306550979614, score: 0.78125
[Train] epoch: 100, loss: 0.46629342436790466, score: 0.7953125238418579
[Dev] epoch: 100, loss: 0.46754398941993713, score: 0.78125
[Train] epoch: 150, loss: 0.4502924978733063, score: 0.800000011920929
[Dev] epoch: 150, loss: 0.45836305618286133, score: 0.78125
[Train] epoch: 200, loss: 0.4423935115337372, score: 0.8031250238418579
[Dev] epoch: 200, loss: 0.45567020773887634, score: 0.78125
best accuracy performence has been updated: 0.78125 --> 0.78750
best accuracy performence has been updated: 0.78750 --> 0.79375
[Train] epoch: 250, loss: 0.4379945397377014, score: 0.8031250238418579
[Dev] epoch: 250, loss: 0.45530444383621216, score: 0.793749988079071
best accuracy performence has been updated: 0.79375 --> 0.80000
[Train] epoch: 300, loss: 0.4353612959384918, score: 0.8046875
[Dev] epoch: 300, loss: 0.4558488428592682, score: 0.800000011920929
[Train] epoch: 350, loss: 0.4337103068828583, score: 0.807812511920929
[Dev] epoch: 350, loss: 0.4567358195781708, score: 0.800000011920929
[Train] epoch: 400, loss: 0.43264254927635193, score: 0.809374988079071
[Dev] epoch: 400, loss: 0.4577171504497528, score: 0.800000011920929
[Train] epoch: 450, loss: 0.43193674087524414, score: 0.809374988079071
[Dev] epoch: 450, loss: 0.4586794078350067, score: 0.800000011920929
Process finished with exit code 0
进行可视化:
# 可视化观察训练集与验证集的指标变化情况
def plot(runner,fig_name):
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
epochs = [i for i in range(len(runner.train_scores))]
# 绘制训练损失变化曲线
plt.plot(epochs, runner.train_loss, color='#e4007f', label="Train loss")
# 绘制评价损失变化曲线
plt.plot(epochs, runner.dev_loss, color='#f19ec2', linestyle='--', label="Dev loss")
# 绘制坐标轴和图例
plt.ylabel("loss", fontsize='large')
plt.xlabel("epoch", fontsize='large')
plt.legend(loc='upper right', fontsize='x-large')
plt.subplot(1,2,2)
# 绘制训练准确率变化曲线
plt.plot(epochs, runner.train_scores, color='#e4007f', label="Train accuracy")
# 绘制评价准确率变化曲线
plt.plot(epochs, runner.dev_scores, color='#f19ec2', linestyle='--', label="Dev accuracy")
# 绘制坐标轴和图例
plt.ylabel("score", fontsize='large')
plt.xlabel("epoch", fontsize='large')
plt.legend(loc='lower right', fontsize='x-large')
plt.tight_layout()
plt.savefig(fig_name)
plt.show()
plot(runner,fig_name='linear-acc.pdf')
输出结果:
3.1.8 模型评价
score, loss = runner.evaluate([X_test, y_test])
print("[Test] score/loss: {:.4f}/{:.4f}".format(score, loss))
进行可视化:
def decision_boundary(w, b, x1):
w1, w2 = w
x2 = (- w1 * x1 - b) / w2
return x2
plt.figure(figsize=(5,5))
# 绘制原始数据
plt.scatter(X[:, 0].tolist(), X[:, 1].tolist(), marker='*', c=y.tolist())
w = model.params['w']
b = model.params['b']
x1 = torch.linspace(-2, 3, 1000)
x2 = decision_boundary(w, b, x1)
# 绘制决策边界
plt.plot(x1.tolist(), x2.tolist(), color="red")
plt.show()
输出结果:
[Test] score/loss: 0.3050/0.5848
3.2 基于Softmax回归的多分类任务
Logistic回归可以有效地解决二分类问题,但在分类任务中,还有一类多分类问题,即类别数C
大于2 的分类问题。Softmax回归就是Logistic回归在多分类问题上的推广。
使用Softmax回归模型对一个简单的数据集进行多分类实验。
3.2.1 数据集构建
我们首先构建一个简单的多分类任务,并构建训练集、验证集和测试集。
本任务的数据来自3个不同的簇,每个簇对一个类别。我们采集1000条样本,每个样本包含2个特征。
import numpy as np
import torch
import matplotlib.pyplot as plt
def make_multiclass_classification(n_samples=100, n_features=2, n_classes=3, shuffle=True, noise=0.1):
# 计算每个类别的样本数量
n_samples_per_class = [int(n_samples / n_classes) for k in range(n_classes)]
for i in range(n_samples - sum(n_samples_per_class)):
n_samples_per_class[i % n_classes] += 1
# 将特征和标签初始化为0
X = torch.zeros([n_samples, n_features])
y = torch.zeros(n_samples, dtype=torch.int32)
# 随机生成3个簇中心作为类别中心
centroids = torch.randperm(2 ** n_features)[:n_classes]
centroids_bin = np.unpackbits(centroids.numpy().astype('uint8')).reshape((-1, 8))[:, -n_features:]
centroids = torch.as_tensor(centroids_bin, dtype=torch.float32)
# 控制簇中心的分离程度
centroids = 1.5 * centroids - 1
# 随机生成特征值
X[:, :n_features] = torch.randn([n_samples, n_features])
stop = 0
# 将每个类的特征值控制在簇中心附近
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_class[k]
# 指定标签值
y[start:stop] = k % n_classes
X_k = X[start:stop, :n_features]
# 控制每个类别特征值的分散程度
A = 2 * torch.rand([n_features, n_features]) - 1
X_k[...] = torch.matmul(X_k, A)
X_k += centroid
X[start:stop, :n_features] = X_k
# 如果noise不为None,则给特征加入噪声
if noise > 0.0:
# 生成noise掩膜,用来指定给那些样本加入噪声
noise_mask = torch.rand([n_samples]) < noise
for i in range(len(noise_mask)):
if noise_mask[i]:
# 给加噪声的样本随机赋标签值
y[i] = torch.randint(n_classes, size=[1],dtype=torch.int32)
# 如果shuffle为True,将所有数据打乱
if shuffle:
idx = torch.randperm(X.shape[0])
X = X[idx]
y = y[idx]
return X, y
# 采样1000个样本
n_samples = 1000
X, y = make_multiclass_classification(n_samples=n_samples, n_features=2, n_classes=3, noise=0.2)
# 可视化生产的数据集,不同颜色代表不同类别
plt.figure(figsize=(5,5))
plt.scatter(x=X[:, 0].tolist(), y=X[:, 1].tolist(), marker='*', c=y.tolist())
plt.savefig('linear-dataset-vis2.pdf')
plt.show()
输出结果:
将实验数据拆分成训练集、验证集和测试集。其中训练集640条、验证集160条、测试集200条。
num_train = 640
num_dev = 160
num_test = 200
X_train, y_train = X[:num_train], y[:num_train]
X_dev, y_dev = X[num_train:num_train + num_dev], y[num_train:num_train + num_dev]
X_test, y_test = X[num_train + num_dev:], y[num_train + num_dev:]
# 打印X_train和y_train的维度
print("X_train shape: ", X_train.shape, "y_train shape: ", y_train.shape)
# 打印X_train和y_train的维度
print("X_train shape: ", X_train.shape, "y_train shape: ", y_train.shape)
# 打印前5个数据的标签
print(y_train[:5])
输出结果:
完成了Multi1000数据集的构建
下面打印前5个数据的标签
print(y_train[:5])
前五个数据标签如下:
tensor([0, 2, 0, 0, 1], dtype=torch.int32)
3.2.2 模型构建
在Softmax回归中,对类别进行预测的方式是预测输入属于每个类别的条件概率。与Logistic 回归不同的是,Softmax回归的输出值个数等于类别数CC,而每个类别的概率值则通过Softmax函数进行求解。
Softmax函数的代码实现如下:
# x为tensor
def softmax(X):
"""
输入:
- X:shape=[N, C],N为向量数量,C为向量维度
"""
x_max = torch.max(X)#N,1
x_exp = torch.exp(X - x_max)
partition = torch.sum(x_exp, 1, True)#N,1
return x_exp / partition
# 观察softmax的计算方式
X = torch.tensor([[0.1, 0.2, 0.3, 0.4],[1,2,3,4]])
predict = softmax(X)
print(predict)
输出结果:
tensor([0, 2, 0, 0, 1], dtype=torch.int32)
tensor([[0.2138, 0.2363, 0.2612, 0.2887],
[0.0321, 0.0871, 0.2369, 0.6439]])
3.2.2.2 Softmax回归算子
在Softmax回归中,类别标签 {}。给定一个样本xx,使用Softmax回归预测的属于类别c的条件概率为:
根据公式实现Softmax回归算子,代码实现如下:
class model_SR(op.Op):
def __init__(self, input_dim, output_dim):
super(model_SR, self).__init__()
self.params = {}
# 将线性层的权重参数全部初始化为0
self.params['W'] = torch.zeros([input_dim, output_dim])
# self.params['W'] = paddle.normal(mean=0, std=0.01, shape=[input_dim, output_dim])
# 将线性层的偏置参数初始化为0
self.params['b'] = torch.zeros([output_dim])
self.outputs = None
def __call__(self, inputs):
return self.forward(inputs)
def forward(self, inputs):
"""
输入:
- inputs: shape=[N,D], N是样本数量,D是特征维度
输出:
- outputs:预测值,shape=[N,C],C是类别数
"""
# 线性计算
score = torch.matmul(inputs, self.params['W']) + self.params['b']
# Softmax 函数
self.outputs = softmax(score)
return self.outputs
# 随机生成1条长度为4的数据
inputs = torch.randn([1,4])
print('Input is:', inputs)
# 实例化模型,这里令输入长度为4,输出类别数为3
model = model_SR(input_dim=4, output_dim=3)
outputs = model(inputs)
print('Output is:', outputs)
输出结果:
Input is: tensor([[ 1.2374, -0.4203, 1.5545, 0.2168]])
Output is: tensor([[0.3333, 0.3333, 0.3333]])
思考题:Logistic函数是激活函数。Softmax函数是激活函数么?谈谈你的看法。
是激活函数,Softmax函数可以将上一层的原始数据进行归一化,转化为一个(0,1)之间的数值,这些数值可以被当做概率分布,用来作为多分类的目标预测值。
3.2.3 损失函数
多类交叉熵损失函数的代码实现如下:
class MultiCrossEntropyLoss(op.Op):
def __init__(self):
self.predicts = None
self.labels = None
self.num = None
def __call__(self, predicts, labels):
return self.forward(predicts, labels)
def forward(self, predicts, labels):
"""
输入:
- predicts:预测值,shape=[N, 1],N为样本数量
- labels:真实标签,shape=[N, 1]
输出:
- 损失值:shape=[1]
"""
self.predicts = predicts
self.labels = labels
self.num = self.predicts.shape[0]
loss = 0
for i in range(0, self.num):
index = self.labels[i]
loss -= torch.log(self.predicts[i][index])
return loss / self.num
# 测试一下
# 假设真实标签为第1类
labels = torch.tensor([0])
# 计算风险函数
mce_loss = MultiCrossEntropyLoss()
print(mce_loss(outputs, labels))
输出结果:
tensor(1.0876)
3.2.4 模型优化
使用3.1.4.2中实现的梯度下降法进行参数更新
class model_SR(op.Op):
def __init__(self, input_dim, output_dim):
super(model_SR, self).__init__()
self.params = {}
# 将线性层的权重参数全部初始化为0
self.params['W'] = torch.zeros([input_dim, output_dim])
# self.params['W'] = paddle.normal(mean=0, std=0.01, shape=[input_dim, output_dim])
# 将线性层的偏置参数初始化为0
self.params['b'] = torch.zeros([output_dim])
# 存放参数的梯度
self.grads = {}
self.X = None
self.outputs = None
self.output_dim = output_dim
def __call__(self, inputs):
return self.forward(inputs)
def forward(self, inputs):
self.X = inputs
# 线性计算
score = torch.matmul(self.X, self.params['W']) + self.params['b']
# Softmax 函数
self.outputs = softmax(score)
return self.outputs
def backward(self, labels):
"""
输入:
- labels:真实标签,shape=[N, 1],其中N为样本数量
"""
# 计算偏导数
N = labels.shape[0]
labels = torch.nn.functional.one_hot(labels.to(torch.float32), self.output_dim)
self.grads['W'] = -1 / N * torch.matmul(self.X.t(), (labels - self.outputs))
self.grads['b'] = -1 / N * torch.matmul(torch.ones([N]), (labels - self.outputs))
3.2.5 模型训练
实例化RunnerV2类,并传入训练配置。使用训练集和验证集进行模型训练,共训练500个epoch。每隔50个epoch打印训练集上的指标。
# 固定随机种子,保持每次运行结果一致
torch.manual_seed(0)
# 特征维度
input_dim = 2
# 类别数
output_dim = 3
# 学习率
lr = 0.1
# 实例化模型
model = model_SR(input_dim=input_dim, output_dim=output_dim)
# 指定优化器
optimizer = SimpleBatchGD(init_lr=lr, model=model)
# 指定损失函数
loss_fn = MultiCrossEntropyLoss()
# 指定评价方式
metric = accuracy
# 实例化RunnerV2类
runner = RunnerV2(model, optimizer, metric, loss_fn)
# 模型训练
runner.train([X_train, y_train], [X_dev, y_dev], num_epochs=500, log_eopchs=50, eval_epochs=1, save_path="best_model.pdparams")
输出结果:
best accuracy performence has been updated: 0.00000 --> 0.70625
[Train] epoch: 0, loss: 1.0986149311065674, score: 0.3218750059604645
[Dev] epoch: 0, loss: 1.0805636644363403, score: 0.706250011920929
best accuracy performence has been updated: 0.70625 --> 0.71250
best accuracy performence has been updated: 0.71250 --> 0.71875
best accuracy performence has been updated: 0.71875 --> 0.72500
best accuracy performence has been updated: 0.72500 --> 0.73125
best accuracy performence has been updated: 0.73125 --> 0.73750
best accuracy performence has been updated: 0.73750 --> 0.74375
best accuracy performence has been updated: 0.74375 --> 0.75000
best accuracy performence has been updated: 0.75000 --> 0.75625
best accuracy performence has been updated: 0.75625 --> 0.76875
best accuracy performence has been updated: 0.76875 --> 0.77500
best accuracy performence has been updated: 0.77500 --> 0.78750
[Train] epoch: 100, loss: 0.7155235409736633, score: 0.768750011920929
[Dev] epoch: 100, loss: 0.7977758049964905, score: 0.7875000238418579
best accuracy performence has been updated: 0.78750 --> 0.79375
best accuracy performence has been updated: 0.79375 --> 0.80000
[Train] epoch: 200, loss: 0.6921819448471069, score: 0.784375011920929
[Dev] epoch: 200, loss: 0.8020225763320923, score: 0.793749988079071
best accuracy performence has been updated: 0.80000 --> 0.80625
[Train] epoch: 300, loss: 0.6840381026268005, score: 0.7906249761581421
[Dev] epoch: 300, loss: 0.81141597032547, score: 0.8062499761581421
best accuracy performence has been updated: 0.80625 --> 0.81250
[Train] epoch: 400, loss: 0.6802140474319458, score: 0.807812511920929
[Dev] epoch: 400, loss: 0.819807231426239, score: 0.8062499761581421
Process finished with exit code 0
3.2.6 模型评价
使用测试集对训练完成后的最终模型进行评价,观察模型在测试集上的准确率。
score, loss = runner.evaluate([X_test, y_test])
print("[Test] score/loss: {:.4f}/{:.4f}".format(score, loss))
输出结果:
[Test] score/loss: 0.8400/0.7014
3.3 实践:基于Softmax回归完成鸢尾花分类任务
步骤:数据处理、模型构建、损失函数定义、优化器构建、模型训练、模型评价和模型预测等,
数据处理:根据网络接收的数据格式,完成相应的预处理操作,保证模型正常读取;
模型构建:定义Softmax回归模型类;
训练配置:训练相关的一些配置,如:优化算法、评价指标等;
组装Runner类:Runner用于管理模型训练和测试过程;
模型训练和测试:利用Runner进行模型训练、评价和测试。
(说明:使用深度学习进行实践时的操作流程基本一致,后文不再赘述。)
主要配置:
数据:Iris数据集;
模型:Softmax回归模型;
损失函数:交叉熵损失;
优化器:梯度下降法;
评价指标:准确率。
3.3.1 数据处理
对数据集中的缺失值或异常值等情况进行分析和处理,保证数据可以被模型正常读取。
- 缺失值检测
-
import pandas as pd # 开源数据分析和操作工具 # 利用pandas加载Iris的数据集 data = pd.read_csv("Iris.csv") # 预览前5行数据 data.head() print(data.isna().sum()) # 查找缺失值
输出结果:
Id 0
SepalLengthCm 0
SepalWidthCm 0
PetalLengthCm 0
PetalWidthCm 0
Species 0
dtype: int64
进程已结束,退出代码为 0
从结果来看没有缺失值
- 异常值处理
-
import matplotlib.pyplot as plt #可视化工具 # 箱线图查看异常值分布 def boxplot(features): feature_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width'] # 连续画几个图片 plt.figure(figsize=(5, 5), dpi=200) # 子图调整 plt.subplots_adjust(wspace=0.6) # 每个特征画一个箱线图 for i in range(4): plt.subplot(2, 2, i+1) # 画箱线图 plt.boxplot(features[:, i], showmeans=True, whiskerprops={"color":"#E20079", "linewidth":0.4, 'linestyle':"--"}, flierprops={"markersize":0.4}, meanprops={"markersize":1}) # 图名 plt.title(feature_names[i], fontdict={"size":5}, pad=2) # y方向刻度 plt.yticks(fontsize=4, rotation=90) plt.tick_params(pad=0.5) # x方向刻度 plt.xticks([]) plt.savefig('ml-vis.pdf') plt.show() boxplot(iris_features)
输出结果:
-
3.1.2 数据集读取
# 加载数据集
def load_data(shuffle=True):
"""
加载鸢尾花数据
输入:
- shuffle:是否打乱数据,数据类型为bool
输出:
- X:特征数据,shape=[150,4]
- y:标签数据, shape=[150]
"""
# 加载原始数据
X = np.array(load_iris().data, dtype=np.float32)
y = np.array(load_iris().target, dtype=np.int32)
X = torch.tensor(X)
y = torch.tensor(y)
# 数据归一化
X_min = torch.min(X, 0)[0]
X_max = torch.max(X, 0)[0]
X = (X-X_min) / (X_max-X_min)
# 如果shuffle为True,随机打乱数据
if shuffle:
idx = torch.randperm(X.shape[0])
X = X[idx]
y = y[idx]
return X, y
# 固定随机种子
torch.seed()
num_train = 120
num_dev = 15
num_test = 15
X, y = load_data(shuffle=True)
print("X shape: ", X.shape, "y shape: ", y.shape)
X_train, y_train = X[:num_train], y[:num_train]
X_dev, y_dev = X[num_train:num_train + num_dev], y[num_train:num_train + num_dev]
X_test, y_test = X[num_train + num_dev:], y[num_train + num_dev:]
输出结果:
X shape: torch.Size([150, 4]) y shape: torch.Size([150])
X_train shape: torch.Size([120, 4]) y_train shape: torch.Size([120])
tensor([1, 0, 2, 1, 1], dtype=torch.int32)
3.3.2 模型构建
使用Softmax回归模型进行鸢尾花分类实验,将模型的输入维度定义为4,输出维度定义为3。
# 输入维度
input_dim = 4
# 类别数
output_dim = 3
# 实例化模型
model = model_SR(input_dim=input_dim, output_dim=output_dim)
输出结果:
X shape: torch.Size([150, 4]) y shape: torch.Size([150])
X_train shape: torch.Size([120, 4]) y_train shape: torch.Size([120])
tensor([1, 1, 1, 2, 2], dtype=torch.int32)
3.3.3 模型训练
# 学习率
lr = 0.2
# 梯度下降法
optimizer = SimpleBatchGD(init_lr=lr, model=model)
# 交叉熵损失
loss_fn = MultiCrossEntropyLoss()
# 准确率
metric = accuracy
# 实例化RunnerV2
runner = RunnerV2(model, optimizer, metric, loss_fn)
# 启动训练
runner.train([X_train, y_train], [X_dev, y_dev], num_epochs=200, log_epochs=10, save_path="best_model.pdparams")
输出结果:
best accuracy performence has been updated: 0.00000 --> 0.70625
[Train] epoch: 0, loss: 1.0986149311065674, score: 0.3218750059604645
[Dev] epoch: 0, loss: 1.0631749629974365, score: 0.706250011920929
best accuracy performence has been updated: 0.70625 --> 0.71250
best accuracy performence has been updated: 0.71250 --> 0.71875
best accuracy performence has been updated: 0.71875 --> 0.72500
best accuracy performence has been updated: 0.72500 --> 0.73125
best accuracy performence has been updated: 0.73125 --> 0.73750
best accuracy performence has been updated: 0.73750 --> 0.74375
best accuracy performence has been updated: 0.74375 --> 0.75625
best accuracy performence has been updated: 0.75625 --> 0.76875
best accuracy performence has been updated: 0.76875 --> 0.77500
best accuracy performence has been updated: 0.77500 --> 0.78750
best accuracy performence has been updated: 0.78750 --> 0.79375
best accuracy performence has been updated: 0.79375 --> 0.80000
[Train] epoch: 100, loss: 0.692050576210022, score: 0.784375011920929
[Dev] epoch: 100, loss: 0.8021999597549438, score: 0.793749988079071
best accuracy performence has been updated: 0.80000 --> 0.80625
best accuracy performence has been updated: 0.80625 --> 0.81250
[Train] epoch: 200, loss: 0.6801761388778687, score: 0.809374988079071
[Dev] epoch: 200, loss: 0.8199884295463562, score: 0.8062499761581421
[Train] epoch: 300, loss: 0.6770952939987183, score: 0.824999988079071
[Dev] epoch: 300, loss: 0.8324155807495117, score: 0.8125
[Train] epoch: 400, loss: 0.6760858297348022, score: 0.832812488079071
[Dev] epoch: 400, loss: 0.8403991460800171, score: 0.8125
[Test] score/loss: 0.8450/0.7047
Process finished with exit code 0
3.3.4 模型评价
# 加载最优模型
runner.load_model('best_model.pdparams')
# 模型评价
score, loss = runner.evaluate([X_test, y_test])
输出结果:
[Test] score/loss: 0.8550/0.7134
为了加深对机器学习模型的理解,请自己动手完成以下实验:
- 尝试调整学习率和训练轮数等超参数,观察是否能够得到更高的精度;(必须完成)
将学习率提高至1,训练次数500次
[Test] score/loss: 0.8450/0.7063
将学习率下降至0.01,训练次数100次
[Test] score/loss: 0.7800/0.9365
将学习率下降至0.001,训练次数1000次
[Test] score/loss: 0.7800/0.9366
实验心得
通过本次实验完善了runner类,学会了基于Logistic回归完成二分类任务,基于softmax回归完成多分类回归,同时应用了softmax完成了对Iris数据集的分类任务,关键是完整的体验了一把线性分类的全过程,感觉非常充实。