import h5py
import sklearn.datasets
import sklearn.linear_model
import matplotlib
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1)
font = fm.FontProperties(fname='/System/Library/Fonts/STHeiti Light.ttc')
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
def sigmoid(input_sum):
output = 1.0/(1+np.exp(-input_sum))
return output, input_sum
def sigmoid_back_propagation(derror_wrt_output, input_sum):
output = 1.0/(1 + np.exp(- input_sum))
doutput_wrt_dinput = output * (1 - output)
derror_wrt_dinput = derror_wrt_output * doutput_wrt_dinput
return derror_wrt_dinput
def relu(input_sum):
output = np.maximum(0, input_sum)
return output, input_sum
def relu_back_propagation(derror_wrt_output, input_sum):
derror_wrt_dinputs = np.array(derror_wrt_output, copy=True)
derror_wrt_dinputs[input_sum <= 0] = 0
return derror_wrt_dinputs
def tanh(input_sum):
output = np.tanh(input_sum)
return output, input_sum
def tanh_back_propagation(derror_wrt_output, input_sum):
output = np.tanh(input_sum)
doutput_wrt_dinput = 1 - np.power(output, 2)
derror_wrt_dinput = derror_wrt_output * doutput_wrt_dinput
return derror_wrt_dinput
def activated(activation_choose, input):
if activation_choose == "sigmoid":
return sigmoid(input)
elif activation_choose == "relu":
return relu(input)
elif activation_choose == "tanh":
return tanh(input)
return sigmoid(input)
def activated_back_propagation(activation_choose, derror_wrt_output, output):
if activation_choose == "sigmoid":
return sigmoid_back_propagation(derror_wrt_output, output)
elif activation_choose == "relu":
return relu_back_propagation(derror_wrt_output, output)
elif activation_choose == "tanh":
return tanh_back_propagation(derror_wrt_output, output)
return sigmoid_back_propagation(derror_wrt_output, output)
class NeuralNetwork:
def __init__(self, layers_strcuture, print_cost = False):
self.layers_strcuture = layers_strcuture
self.layers_num = len(layers_strcuture)
self.param_layers_num = self.layers_num - 1
self.learning_rate = 0.0618
self.num_iterations = 2000
self.x = None
self.y = None
self.w = dict()
self.b = dict()
self.costs = []
self.print_cost = print_cost
self.init_w_and_b()
def set_learning_rate(self, learning_rate):
self.learning_rate = learning_rate
def set_num_iterations(self, num_iterations):
self.num_iterations = num_iterations
def set_xy(self, input, expected_output):
self.x = input
self.y = expected_output
def init_w_and_b(self):
np.random.seed(3)
for l in range(1, self.layers_num):
self.w["w" + str(l)] = np.random.randn(self.layers_strcuture[l], self.layers_strcuture[l-1])/np.sqrt(self.layers_strcuture[l-1])
self.b["b" + str(l)] = np.zeros((self.layers_strcuture[l], 1))
return self.w, self.b
def layer_activation_forward(self, x, w, b, activation_choose):
input_sum = np.dot(w, x) + b
output, _ = activated(activation_choose, input_sum)
return output, (x, w, b, input_sum)
def forward_propagation(self, x):
caches = []
output_prev = x
# range(1, n) => [1, 2, ..., n-1]
L = self.param_layers_num
for l in range(1, L):
input_cur = output_prev
output_prev, cache = self.layer_activation_forward(input_cur, self.w["w"+ str(l)], self.b["b" + str(l)], "tanh")
caches.append(cache)
output, cache = self.layer_activation_forward(output_prev, self.w["w" + str(L)], self.b["b" + str(L)], "sigmoid")
caches.append(cache)
return output, caches
def show_caches(self, caches):
i = 1
for cache in caches:
print("%dtd Layer" % i)
print(" input: %s" % cache[0])
print(" w: %s" % cache[1])
print(" b: %s" % cache[2])
print(" input_sum: %s" % cache[3])
print("----------")
i += 1
def compute_error(self, output):
m = self.y.shape[1]
error = -np.sum(np.multiply(np.log(output),self.y) + np.multiply(np.log(1 - output), 1 - self.y)) / m
error = np.squeeze(error)
return error
def layer_activation_backward(self, derror_wrt_output, cache, activation_choose):
input, w, b, input_sum = cache
output_prev = input
m = output_prev.shape[1]
derror_wrt_dinput = activated_back_propagation(activation_choose, derror_wrt_output, input_sum)
derror_wrt_dw = np.dot(derror_wrt_dinput, output_prev.T) / m
derror_wrt_db = np.sum(derror_wrt_dinput, axis=1, keepdims=True)/m
derror_wrt_output_prev = np.dot(w.T, derror_wrt_dinput)
return derror_wrt_output_prev, derror_wrt_dw, derror_wrt_db
def back_propagation(self, output, caches):
grads = {}
L = self.param_layers_num #
output = output.reshape(output.shape)
expected_output = self.y
derror_wrt_output = - (np.divide(expected_output, output) - np.divide(1 - expected_output, 1 - output))
current_cache = caches[L - 1]
grads["derror_wrt_output" + str(L)], grads["derror_wrt_dw" + str(L)], grads["derror_wrt_db" + str(L)] = \
self.layer_activation_backward(derror_wrt_output, current_cache, "sigmoid")
for l in reversed(range(L - 1)):
current_cache = caches[l]
derror_wrt_output_prev_temp, derror_wrt_dw_temp, derror_wrt_db_temp = \
self.layer_activation_backward(grads["derror_wrt_output" + str(l + 2)], current_cache, "tanh")
grads["derror_wrt_output" + str(l + 1)] = derror_wrt_output_prev_temp
grads["derror_wrt_dw" + str(l + 1)] = derror_wrt_dw_temp
grads["derror_wrt_db" + str(l + 1)] = derror_wrt_db_temp
return grads
def update_w_and_b(self, grads):
for l in range(self.param_layers_num):
self.w["w" + str(l + 1)] = self.w["w" + str(l + 1)] - self.learning_rate * grads["derror_wrt_dw" + str(l + 1)]
self.b["b" + str(l + 1)] = self.b["b" + str(l + 1)] - self.learning_rate * grads["derror_wrt_db" + str(l + 1)]
def training_modle(self):
"""训练神经网络模型"""
np.random.seed(5)
for i in range(0, self.num_iterations):
output, caches = self.forward_propagation(self.x)
cost = self.compute_error(output)
grads = self.back_propagation(output, caches)
self.update_w_and_b(grads)
if self.print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" % (i, cost))
if self.print_cost and i % 1000 == 0:
self.costs.append(cost)
if False:
plt.plot(np.squeeze(self.costs))
plt.ylabel(u'神经网络误差', fontproperties = font)
plt.xlabel(u'迭代次数 (*100)', fontproperties = font)
plt.title(u"学习率 =" + str(self.learning_rate), fontproperties = font)
plt.show()
return self.w, self.b
def predict_by_modle(self, x):
output, _ = self.forward_propagation(x.T)
output = output.T
result = output / np.sum(output, axis=1, keepdims=True)
return np.argmax(result, axis=1)
def plot_decision_boundary(xy, colors, pred_func):
x_min, x_max = xy[:, 0].min() - 0.5, xy[:, 0].max() + 0.5
y_min, y_max = xy[:, 1].min() - 0.5, xy[:, 1].max() + 0.5
h = .01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(xy[:, 0], xy[:, 1], c=colors, marker='o', cmap=plt.cm.Spectral, edgecolors='black')
if __name__ == "__main__":
plt.figure(figsize=(16, 32))
xy, colors = sklearn.datasets.make_moons(60, noise=1.0)
expect_output = []
for c in colors:
if c == 1:
expect_output.append([0,1])
else:
expect_output.append([1,0])
expect_output = np.array(expect_output).T
hidden_layer_neuron_num_list = [1,2,4,10,20,50]
for i, hidden_layer_neuron_num in enumerate(hidden_layer_neuron_num_list):
plt.subplot(5, 2, i + 1)
plt.title(u'隐藏层神经元数量: %d' % hidden_layer_neuron_num, fontproperties = font)
nn = NeuralNetwork([2, hidden_layer_neuron_num, 2], True)
nn.set_xy(xy.T, expect_output)
nn.set_num_iterations(30000)
nn.set_learning_rate(0.1)
w, b = nn.training_modle()
plot_decision_boundary(xy, colors, nn.predict_by_modle)
plt.show()
2020.9.10作业
最新推荐文章于 2024-07-30 20:28:19 发布