神经网络
-
神经网络迭代原理代码.
-
激活函数使用sigmod.
import numpy as np """ 简易神经网络:通过不断的调整权重值使得,达到拟合的结果. """ try: # Python 2 xrange except NameError: # Python 3, xrange is now named range. xrange = range # sigmod函数,True则返回其导数. def sigmod(x, deriv=False): if (deriv == True): return x * (1 - x) return 1 / (1 + np.exp(-x)) X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) y = np.array([[0], [1], [1], [0]]) # 随机种子设置:保证random的值恒定. np.random.seed(10) # 随机初始化权重值为:w0:3*4,[-1,1] w1:4*1,[-1,1] w0 = 2 * np.random.random((3, 4)) - 1 w1 = 2 * np.random.random((4, 1)) - 1 for j in xrange(100000): # 调整10w次. l0 = X # 第一层激活函数执行后的结果(输出矩阵):5*4 l1 = sigmod(np.dot(l0, w0)) # 第二层激活函数执行后的结果(输出矩阵):5*1 l2 = sigmod(np.dot(l1, w1)) # 矩阵差值:残差. l2_error = y - l2 # 每运行1w窗口显示残差. if (j % 10000) == 0: print("Error:" + str(np.mean(np.abs(l2_error)))) # 计算l2网络层偏导数,残差*偏导数作为'学习率'=l2_delta. l2_delta = l2_error * sigmod(l2, deriv=True) # 5*1 dot (4*1).T==>5*4. l1_error = l2_delta.dot(w1.T) # 5*4 * (5*4)==>5*4.此时得到l1'学习率'. l1_delta = l1_error * sigmod(l1, deriv=True) # 权重参数调整:4*1. w1 += l1.T.dot(l2_delta) # 权重参数调整:3*4. w0 += l0.T.dot(l1_delta)
点的生成与拟合.
生成满足y = a*x + b的数据点.
-
设置数据分布在直线的附近.
按照实现的速度快慢排序,友情提示:numpy与tensorflow数据类型不兼容,使用
tf.convert_to_tensor()
.-
code01
import time import numpy as np import matplotlib.pyplot as plt """ 随机生成10000个点,围绕在y=0.1x+0.3的直线周围. """ start = time.clock() num_points = 100000 x = np.random.normal(0.0, 0.6, num_points) y = x * 0.1 + 0.3 * np.ones_like(x) + np.random.normal(0.0, 0.03, num_points) vectors_set = [x, y] plt.scatter(x,y,c='r') plt.show() TimeCost = (time.clock() - start) print("Time used:", TimeCost) Time used: 1.2579034654087218
-
code02
import time import numpy as np import matplotlib.pyplot as plt """ 随机生成100000个点,围绕在y=0.1x+0.3的直线周围. """ start = time.clock() num_points = 100000 vectors_set = [] for i in range(num_points): x1 = np.random.normal(0.0, 0.6) y1 = x1 * 0.1 + 0.3 + np.random.normal
-