def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
N = X.shape[0]
C = W.shape[1]
result = X.dot(W)
result -= np.max(result,axis=1,keepdims=True)#避免指数太大,导致计算太大,内存不够
for i in range(N):
soft_max = np.exp(result[i][y[i
cs231n作业:Assignment1-softmax
最新推荐文章于 2022-01-28 16:39:37 发布