#神经网络
import numpy as np
N, D_in, H, D_out = 64, 100, 100, 10
x = np.random.randn(N, D_in) #64*1000
y = np.random.randn(N, D_out) #64*10
w1 = np.random.randn(D_in, H) #1000*100
w2 = np.random.randn(H, D_out) #100*10
learning_rate =1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1) #same to np.dot(x,w1), h:64*100
h_relu = np.maximum(h, 0) #64*100
y_pred = h_relu.dot(w2) #64*10
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred) #h_relu.T is the transpose of h_relu,也就是转置,h_relu.T:100*64
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2