Softmax exercise
这一章节我们需要完成:
- 用向量化的方法完成Softmax损失函数
- 完成用全向量化的方法来计算理论梯度
- 用数值话方法来检查梯度是否正确
- 使用一个验证集来估计学习率和正则化值
- 用SGD来优化损失函数
- 可视化最后的学习到的权重W
导入之前代码,导入数据集
from __future__ import print_function
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
print('dev data shape: ', X_dev.shape)
print('dev labels shape: ', y_dev.shape)
我们先来复习一下Softmax的loss函数表达式,定义下面概率函数:
P
(
Y
=
k
∣
X
=
x
i
)
=
e
s
k
∑
j
e
s
j
P(Y=k|X=x_i)=\frac{e^{s_k}}{\sum_j e^{s_j}}
P(Y=k∣X=xi)=∑jesjesk
那么我的的损失函数可以表达为:
L
i
=
−
l
o
g
P
(
Y
=
k
∣
X
=
x
i
)
=
−
l
o
g
(
e
s
k
∑
j
e
s
j
)
L_ i = -logP(Y=k|X=x_i)=-log(\frac{e^{s_k}}{\sum_j e^{s_j}})
Li=−logP(Y=k∣X=xi)=−log(∑jesjesk)
下面来推到梯度函数:
∂
L
∂
w
=
∂
L
∂
P
∂
P
∂
f
∂
f
∂
w
\frac{\partial L}{\partial w}= \frac{\partial L}{\partial P}\frac{\partial P}{\partial f}\frac{\partial f}{\partial w}
∂w∂L=∂P∂L∂f∂P∂w∂f
其中 f = e X W , P = e s k ∑ j e s j f =e^{XW} ,P=\frac{e^{s_k}}{\sum_j e^{s_j}} f=eXW,P=∑jesjesk
∂ L ∂ P = − 1 P \frac{\partial L}{\partial P}=- \frac{1}{P} ∂P∂L=−P1
∂ P ∂ f = { P y i − 1 f k = y i P k f k ≠ y i \frac{\partial P}{\partial f}=\left\{ \begin{aligned} P_{yi}-1 && f_k=y_i\\ P_k && f_k \neq y_i \\ \end{aligned} \right. ∂f∂P={Pyi−1Pkfk=yifk=yi
∂ f ∂ w = X T \frac{\partial f}{\partial w} = X^{T} ∂w∂f=XT
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_train = X.shape[0]
classes = W.shape[1]
L=0.0
nom=np.zeros([classes])
P=np.zeros([classes])
for k in range(num_train):
y_ = np.dot(X[k],W)
denom = 0.0
for i in range(classes):
nom[i] = np.exp(y_[i])
denom += nom[i]
L += -np.log(nom[y[k]]/denom)
for i in range(classes):
P[i] = nom[i] / denom
if y[k]==i:
P[i] = nom[i] / denom -1
else:
P[i] = nom[i] / denom
dW[:,i]+=np.dot(X[k,:].T,P[i])
dW = 1.0 / num_train * dW + reg * W
loss= 1.0 / num_train * L +1/2 *reg*np.sum(W * W)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
'----------------计算loss-----------------'
y_c = np.dot(X, W)
y_exp = np.exp(y_c)
num_train = y_c.shape[0]
classes = W.shape[1]
'初始化变量'
y_nom = np.zeros([num_train, 1])
'计算分母'
y_deno = np.sum(y_exp, axis=1).reshape(num_train, 1)
'计算分子'
y_nom[range(num_train), :] = y_exp[range(num_train), y].reshape(num_train, 1)
'除法'
div = y_nom / y_deno
loss = -np.log(div)
loss = 1.0 / num_train * np.sum(loss) + 0.5* reg * np.sum(W * W)
'----------------计算grad-----------------'
y_pnom = np.zeros([num_train,classes])
y_pnom[range(num_train), :] = y_deno[range(num_train)]
P = y_exp/y_pnom
P[range(num_train),y]-=1
dW = 1.0 / num_train * np.dot(X.T,P) + reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
与上节softmax一样,我们使用验证集来选定超参数:
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7,2e-7,5e-7]
regularization_strengths = [3e4,3.25e4,3.5e4]
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained softmax classifer in best_softmax. #
################################################################################
for lr in learning_rates:
for reg in regularization_strengths:
Softmaxmodel = Softmax()
Softmaxmodel.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=2000,
batch_size=200, verbose=True)
y_train_pred = Softmaxmodel.predict(X_train)
y_val_pred = Softmaxmodel.predict(X_val)
accuracy_train = np.mean(y_train == y_train_pred)
accuracy = np.mean(y_val == y_val_pred)
if best_val < accuracy:
best_val = accuracy
best_softmax = Softmaxmodel
results[(lr,reg)]=(accuracy_train,accuracy_val)
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
lr 1.000000e-07 reg 3.000000e+04 train accuracy: 0.340755 val accuracy: 0.144000
lr 1.000000e-07 reg 3.250000e+04 train accuracy: 0.341490 val accuracy: 0.144000
lr 1.000000e-07 reg 3.500000e+04 train accuracy: 0.342000 val accuracy: 0.144000
lr 2.000000e-07 reg 3.000000e+04 train accuracy: 0.342469 val accuracy: 0.144000
lr 2.000000e-07 reg 3.250000e+04 train accuracy: 0.336816 val accuracy: 0.144000
lr 2.000000e-07 reg 3.500000e+04 train accuracy: 0.341429 val accuracy: 0.144000
lr 5.000000e-07 reg 3.000000e+04 train accuracy: 0.342408 val accuracy: 0.144000
lr 5.000000e-07 reg 3.250000e+04 train accuracy: 0.334571 val accuracy: 0.144000
lr 5.000000e-07 reg 3.500000e+04 train accuracy: 0.343347 val accuracy: 0.144000
best validation accuracy achieved during cross-validation: 0.355000
# Visualize the learned weights for each class
w = best_softmax.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])