【深度之眼cs231n第七期】笔记(八)

softmax.ipynb

导入数据并进行预处理

导包

from __future__ import print_function
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt

%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # 默认画图大小
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

%load_ext autoreload
%autoreload 2

加载数据并进行数据预处理

def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):
    # 加载数据
    cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'        
    X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
    
    # 分离训练、验证、测试和开发数据集
    mask = list(range(num_training, num_training + num_validation))
    X_val = X_train[mask]
    y_val = y_train[mask]
    mask = list(range(num_training))
    X_train = X_train[mask]
    y_train = y_train[mask]
    mask = list(range(num_test))
    X_test = X_test[mask]
    y_test = y_test[mask]
    mask = np.random.choice(num_training, num_dev, replace=False)
    X_dev = X_train[mask]
    y_dev = y_train[mask]
    
    # 把图像矩阵拉伸为行向量
    X_train = np.reshape(X_train, (X_train.shape[0], -1))
    X_val = np.reshape(X_val, (X_val.shape[0], -1))
    X_test = np.reshape(X_test, (X_test.shape[0], -1))
    X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
    
    # 减去均值图像
    mean_image = np.mean(X_train, axis = 0)
    X_train -= mean_image
    X_val -= mean_image
    X_test -= mean_image
    X_dev -= mean_image
    
    # 添加x0=1
    X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
    X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
    X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
    X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])    
    return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev

# 如果加载过数据,首先删除,以防内存泄漏
try:
   del X_train, y_train
   del X_test, y_test
   print('Clear previously loaded data.')
except:
   pass

# 调用函数
X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
print('dev data shape: ', X_dev.shape)
print('dev labels shape: ', y_dev.shape)
# Train data shape:  (49000, 3073)
# Train labels shape:  (49000,)
# Validation data shape:  (1000, 3073)
# Validation labels shape:  (1000,)
# Test data shape:  (1000, 3073)
# Test labels shape:  (1000,)
# dev data shape:  (500, 3073)
# dev labels shape:  (500,)

计算损失和梯度

先完成softmax.py里的softmax_loss_naive(),再运行下面的代码

from cs231n.classifiers.softmax import softmax_loss_naive
import time

# 随机初始化权重并用它计算损失
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)

# 输出损失,损失应该和-log(0.1)接近
print('loss: %f' % loss)
print('sanity check: %f' % (-np.log(0.1)))
# loss: 2.330105
# sanity check: 2.302585

Q1:为什么希望损失函数和-log(0.1)接近?
A:权重W初始化约等于0,所以对于任一张图片,10个类的得分都和0差不多。
这时Li≈-log( exp(0) / ∑exp(0) )
=-log(1/10)=-log(0.1)

使用数值梯度验证梯度是否正确

from cs231n.gradient_check import grad_check_sparse

# 检验没加正则化的梯度
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)

# 检验加上正则化的梯度
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# 这次的误差应该在1e-8左右。由于softmax导数连续,应该不会有大于1e-4的了

先完成softmax.py里的softmax_loss_vectorized(),再运行下面的代码

# 向量化计算的速度应该比非向量化的快
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('naive loss: %e computed in %fs' % (loss_naive, toc - tic))

from cs231n.classifiers.softmax import softmax_loss_vectorized
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))

# 向量化和非向量化的梯度结果应该是一样的
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)

选择超参数

# 选择较好的参数能在验证集上达到0.35的准确率
from cs231n.classifiers import Softmax

results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7 ,2e-7, 3e-7,4e-7]
regularization_strengths = [2.5e4, 5e4,7.5e4]

from copy import deepcopy
for lr in learning_rates:
    for reg in regularization_strengths:
        clf = Softmax()
        clf.train(X_train, y_train, lr, reg, num_iters=1500)
        y_train_pred = clf.predict(X_train)
        y_val_pred = clf.predict(X_val)
        # 计算准确率        
        train_accuracy = np.mean(y_train_pred==y_train)
        val_accuracy = np.mean(y_val_pred==y_val)
        # 保存准确率
        results[(lr,reg)] = (train_accuracy,val_accuracy)
        if val_accuracy>best_val:
            best_val = val_accuracy
            best_softmax = deepcopy(clf)
   
# 输出结果
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
                lr, reg, train_accuracy, val_accuracy))    

print('best validation accuracy achieved during cross-validation: %f' % best_val)
# best validation accuracy achieved during cross-validation: 0.351000

计算最终模型在测试集上的准确率

y_test_pred = best_softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, ))

Q2:有没有可能,添加一个数据样本后SVM的损失不变,而softmax的损失会改变?
A:有可能。如果这个数据样本的正确类得分远远高于其他类,该数据样本就不会产生损失。
在这里插入图片描述
而对于softmax来说
在这里插入图片描述

可视化权重

# 可视化权重
w = best_softmax.W[:-1,:] # 舍弃偏置b
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,4))

for i in range(10):
    plt.subplot(2, 5, i + 1)    
    # w[:, :, :, i].shape=(32,32,3,1),w[:, :, :, i].squeeze().shape=(32,32,3)
    # 像素要在0-255之间
    wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
    plt.imshow(wimg.astype('uint8'))
    plt.axis('off')
    # 显示类名称
    plt.title(classes[i])

在这里插入图片描述

softmax.py

使用循环计算损失和梯度

损失函数公式,减去常数c是为了防止分数比较大的时候,exp(分数)溢出:
在这里插入图片描述
梯度公式:
在这里插入图片描述

import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
  # 初始化
  loss = 0.0
  dW = np.zeros_like(W)
  num_train = X.shape[0]
  num_classes = W.shape[1]
  
  for i in range(num_train):
    # 计算得分
    scores = X[i].dot(W)
    # 减去得分最大的,使exp(scores)在(0,1],防止exp(scores)过大导致的溢出
    scores -= max(scores)
    exp_sum = np.sum(np.exp(scores))
    
    # 计算损失
    loss += np.log(exp_sum) - scores[y[i]]
    
    # 计算梯度
    dW[:, y[i]] += -X[i]
    for j in range(num_classes):
        dW[:,j] += (np.exp(scores[j]) / exp_sum) * X[i]
  
  # 求平均和正则化
  loss /= num_train
  loss += reg * np.sum(W**2)
  dW /= num_train
  dW += reg * 2 * W
  
  return loss, dW

向量化方法计算损失和梯度

向量化的方法和上一个作业的类似
在这里插入图片描述

def softmax_loss_vectorized(W, X, y, reg):
  # 初始化
  loss = 0.0
  dW = np.zeros_like(W)
  num_train = X.shape[0]
  num_classes = W.shape[1]
  
  # 计算得分
  scores = X@W
  # 减去得分最大的,使exp(scores)在(0,1],防止exp(scores)过大而溢出
  scores -= np.max(scores,1).reshape(-1,1)
  e_scores = np.exp(scores)
  
  # 只计算正确类别的损失
  rows = range(num_train)
  correct_class_score = scores[rows,y]  
  loss = np.sum(np.log(np.sum(e_scores,1))-correct_class_score)
  
  # 计算梯度
  mask = np.zeros(e_scores.shape)
  mask[rows,y] = -1 # 正确类别要多减一个Xi
  e_scores /= np.sum(e_scores,1).reshape(-1,1)
  e_scores += mask
  dW = X.T@e_scores  
  
  # 求平均和正则化
  loss /= num_train
  loss += reg*np.sum(W**2)
  dW /= num_train
  dW += reg*2*W
  
  return loss, dW
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值