X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations): #num_iterations--迭代次数
# Forward propagation
a, caches = forward_propagation(X, parameters)
# Compute cost.
cost = compute_cost(a, Y)
# Backward propagation.
# Update parameters.
parameters = update_parameters(parameters, grads)

X = data_input
Y = labels
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1, m))
for i in range(0, num_iterations):
for j in range(0, m):  # 每次训练一个样本
# Forward propagation
AL,caches = forward_propagation(shuffled_X[:, j].reshape(-1,1), parameters)
# Compute cost
cost = compute_cost(AL, shuffled_Y[:, j].reshape(1,1))
# Backward propagation
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)

mini-batch gradient descent 是batch gradient descent和stochastic gradient descent的折中方案，就是mini-batch gradient descent每次用一部分样本来更新参数，即 $batch\mathrm{_}size$$batch\_size$。因此，若$batch\mathrm{_}size=1$$batch\_size = 1$ 则变成了SGD，若$batch\mathrm{_}size=m$$batch\_size = m$ 则变成了batch gradient descent。$batch\mathrm{_}size$$batch\_size$通常设置为2的幂次方，通常设置$2，4，8，16，32，64，128，256，512$$2，4，8，16，32，64，128，256，512$（很少设置大于512）。因为设置成2的幂次方，更有利于GPU加速。现在深度学习中，基本上都是用 mini-batch gradient descent，（在深度学习中，很多直接把mini-batch gradient descent（a.k.a stochastic mini-batch gradient descent）简称为SGD，所以当你看到深度学习中的SGD，一般指的就是mini-batch gradient descent）。下面用几张图来展示下mini-batch gradient descent的原理（图片来自ng deep learning 课）：

1.首先要把训练集分成多个batch

# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer

Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed)            # To make your "random" minibatches the same as ours
m = X.shape[1]                  # number of training examples
mini_batches = []

# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))

# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = m//mini_batch_size # number of mini batches
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)

# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)

return mini_batches


seed = 0
for i in range(0, num_iterations):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
AL, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(AL, minibatch_Y)
# Backward propagation
parameters = update_parameters(parameters, grads, learning_rate)