import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_train = X.shape[0]
num_classes = W.shape[1]
for i in range(num_train):
scores = X[i].dot(W)
correct_scores = scores[y[i]]
scores -= correct_scores
loss_i = -np.log(np.exp(scores[y[i]])/np.sum(np.exp(scores)))
loss += loss_i
for j in range(num_classes):
p = np.exp(scores[j]) / np.sum(np.exp(scores))
if j == y[i]:
dW[:, j] += (-1+p)*X[i]
else:
dW[:, j] += p*X[i]
loss = loss/num_train + reg*np.sum(W*W)
dW = dW/num_train + 2*reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
num_train = X.shape[0]
scores = X.dot(W)
# print(scores.shape) (500,10)
correct_scores = scores[np.arange(num_train),y]
# print(correct_loss_scores.shape) (500,)
correct_scores = np.reshape(correct_scores,(num_train,-1))
# print(correct_loss_scores.shape) (500,1)
scores -= correct_scores
loss = np.sum(-np.log(np.exp(scores[np.arange(num_train),y])/np.sum(np.exp(scores),axis=1)))
p = np.exp(scores)/np.reshape(np.sum(np.exp(scores), axis=1),(num_train,-1))
p[np.arange(num_train),y] -= 1
dW = (X.T).dot(p)/num_train + 2*reg*W
# adjust_scores = np.exp(scores - np.max(scores, axis=1).reshape(-1, 1))
# sum_scores = np.sum(adjust_scores, axis=1).reshape(-1, 1)
# class_prob = adjust_scores / sum_scores # shape [N, C]
# prob = class_prob[range(num_train), list(y)]
# total_loss = -np.log(prob)
# loss = np.sum(total_loss) / num_train + reg * np.sum(W * W)
loss = loss/num_train + reg*np.sum(W*W)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW