grokkinDeepLearn Three

tesorflow2的简单使用

实现代码:

import tensorflow as tf
import numpy as np

def main():

    #建立神经网络
    models = tf.keras.Sequential()

    #增加神经元结构input_dim输入维度 units输出多少个值
    models.add(tf.keras.layers.Dense(input_dim=1,units=2))
    models.add(tf.keras.layers.Dense(input_dim=2,units=1))
    models.compile(loss="mse",optimizer="sgd")

    #打印神经结构
    models.summary()

    #准备数据集
    x = np.linspace(-5,5,200)
    y = 2*x + 100 + np.random.normal(0,0.1,(200,))

    #训练模型verbose是否打印训练过程,validation_split多少百分比用来训练
    models.fit(x,y,verbose=1,epochs=200,validation_split=0.2)

    #保存训练模型
    models.save("./line_models.h5")

    # #加载保存的模型
    # models = tf.keras.models.load_model("./line_models.h5")

    #定义一个tensorslow数据
    x = tf.constant([0.5])

    #对x进行预测
    y = models.predict(x)
    print(y)




if __name__ == "__main__":
    main()

《grokking Deep Learning》第五章代码解读

多对一

多对一神经元结构:
多对一神经元结构

球员的输赢预测(多个输入对一个输出进行预测):

#coding="utf-8"

#多对一神经网络
def w_sum(a, b):
    #断言:判断是否相等不相等抛出错误
    assert (len(a) == len(b))
    output = 0
    for i in range(len(a)):
        #权重和输入相乘放回预测值
        output += (a[i] * b[i])
    return output

#初始化权重值
weights = [0.1, 0.2, -0.1]

def neural_network(input, weights=weights):
    #使用函数预测
    pred = w_sum(input,weights)
    return pred

def ele_mul(number, vector):
    output = [0, 0, 0]
    assert (len(output) == len(vector))
    for i in range(len(vector)):
        #误差与输入相乘
        output[i] = number * vector[i]
    return output

def main():
    #数据集
    toes = [8.5, 9.5, 9.9, 9.0]
    wlrec = [0.65, 0.8, 0.8, 0.9]
    nfans = [1.2, 1.3, 0.5, 1.0]
    #对应的结果
    win_or_lose_binary = [1, 1, 0, 1]
    true = win_or_lose_binary[0]
    #准备预测的数据
    input = [toes[0], wlrec[0], nfans[0]]
    # 改变率
    alpha = 0.01
    for iter in range(3):
        # 第一组数据输入进行预测
        pred = neural_network(input, weights)
        # 预测值与真实值的误差平方
        error = (pred - true) ** 2
        # 预测值与真实值的误差
        delta = pred - true
        #权重的差值
        weight_deltas = ele_mul(delta, input)
        #冻结权重(弹性固化神经网络特性)
        weight_deltas[0] = 0
        # 通过误差得到新的权重值
        for i in range(len(weights)):
            # 以alpha的学习率更新权重
            weights[i] -= alpha * weight_deltas[i]
        #输出第几次循环
        print("Iteration:" + str(iter + 1))
        #输出预测
        print("Pred:" + str(pred))
        #输出误差
        print("Error:" + str(error))
        #输出预测与真实值的差距
        print("Delta:" + str(delta))
        #输出权重误差
        print("Weights:" + str(weights))
        #输出权重与真实值的误差
        print("Weight_Deltas:")
        print(str(weight_deltas))
        print()
    #输出权重和权重误差
    print("Weights:" + str(weights))
    print("Weight Deltas:" + str(weight_deltas))

if __name__ == '__main__':
    main()

结果:

Iteration:1
Pred:0.8600000000000001
Error:0.01959999999999997
Delta:-0.1399999999999999
Weights:[0.1, 0.20091, -0.09832]
Weight_Deltas:
[0, -0.09099999999999994, -0.16799999999999987]

Iteration:2
Pred:0.8626075000000001
Error:0.018876699056249977
Delta:-0.13739249999999992
Weights:[0.1, 0.20180305125, -0.09667129]
Weight_Deltas:
[0, -0.08930512499999994, -0.1648709999999999]

Iteration:3
Pred:0.8651664353125001
Error:0.018180090166338207
Delta:-0.13483356468749985
Weights:[0.1, 0.20267946942046874, -0.09505328722375]
Weight_Deltas:
[0, -0.0876418170468749, -0.16180027762499982]

Weights:[0.1, 0.20267946942046874, -0.09505328722375]
Weight Deltas:[0, -0.0876418170468749, -0.16180027762499982]

一对多

一对多神经元结构:
一对多神经元结构

一个输入对多个输出进行预测:

#coding="utf-8"

def scalar_ele_mul(number, vector):
    output = [0, 0, 0]
    assert (len(output) == len(vector))
    for i in range(len(vector)):
        output[i] = number * vector[i]
    return output

#权重值
weights = [0.3, 0.2, 0.9]

def neural_network(input, weights=weights):
    #使用函数预测
    pred = scalar_ele_mul(input,weights)
    return pred

def ele_mul(number, vector):
    output = [0, 0, 0]
    assert (len(output) == len(vector))
    for i in range(len(vector)):
        #误差与输入相乘
        output[i] = number * vector[i]
    return output

def main():
    #数据集
    wlrec = [0.65, 1.0, 1.0, 0.9]
    #对应的结果
    hurt = [0.1, 0.0, 0.0, 0.1]
    win = [1, 1, 0, 1]
    sad = [0.1, 0.0, 0.1, 0.2]
    true = [hurt[0], win[0], sad[0]]
    #准备预测的数据
    input = wlrec[0]
    for iter in range(3):
        # 第一组数据输入进行预测
        pred = neural_network(input, weights)
        #定义误差变量
        error = [0, 0, 0]
        delta = [0, 0, 0]
        for i in range(len(true)):
            # 预测值与真实值的误差平方
            error[i] = (pred[i] - true[i]) ** 2
            # 预测值与真实值的误差
            delta[i] = pred[i] - true[i]
        #权重的差值
        weight_deltas = scalar_ele_mul(input, weights)
    #改变率
    alpha = 0.01
    # 通过误差得到新的权重值
    for i in range(len(weights)):
        #以alpha的学习率更新权重
        weights[i] -= alpha * weight_deltas[i]
    print("Weights:" + str(weights))
    print("Weight Deltas:" + str(weight_deltas))

if __name__ == '__main__':
    main()

输出结果:

Weights:[0.29805, 0.19870000000000002, 0.89415]
Weight Deltas:[0.195, 0.13, 0.5850000000000001]

多对多

多对多神经元结构:
多对多神经元结构

多个输入对多个输出进行预测:

#coding="utf-8"

#多对一神经网络
def w_sum(a, b):
    #断言:判断是否相等不相等抛出错误
    assert (len(a) == len(b))
    output = 0
    for i in range(len(a)):
        #权重和输入相乘放回预测值
        output += (a[i] * b[i])
    return output

def vect_mat_mul(vect,matrix):
     assert (len(vect) == len(matrix))
     output = [0,0,0]
     for i in range(len(vect)):
        output[i] = w_sum(vect,matrix[i])
     return output

#初始化输出
def zeros_matrix(a,b):
    out = []
    for i in range(a):
        x = []
        for i in range(b):
            x.append(0)
        out.append(x)
    return out

def outer_prod(vec_a, vec_b):
    #初始化输出也可以认为是构造初始化列表
    out = zeros_matrix(len(vec_a), len(vec_b))
    for i in range(len(vec_a)):
        for j in range(len(vec_b)):
            out[i][j] = vec_a[i] * vec_b[j]
    return out

#权重值
weights = [ [0.1, 0.1, -0.3],# hurt?
             [0.1, 0.2, 0.0], # win?
             [0.0, 1.3, 0.1] ]# sad?

def neural_network(input, weights=weights):
    #使用函数预测
    pred = vect_mat_mul(input,weights)
    return pred

def main():
    #数据集
    toes = [8.5, 9.5, 9.9, 9.0]
    wlrec = [0.65, 0.8, 0.8, 0.9]
    nfans = [1.2, 1.3, 0.5, 1.0]
    #对应的结果
    hurt = [0.1, 0.0, 0.0, 0.1]
    win = [1, 1, 0, 1]
    sad = [0.1, 0.0, 0.1, 0.2]
    #结果
    true = [hurt[0], win[0], sad[0]]
    #准备预测的数据
    input = [toes[0], wlrec[0], nfans[0]]
    # 第一组数据输入进行预测
    pred = neural_network(input, weights)
    #定义误差变量
    error = [0, 0, 0]
    delta = [0, 0, 0]
    for i in range(len(true)):
        # 预测值与真实值的误差平方
        error[i] = (pred[i] - true[i]) ** 2
        # 预测值与真实值的误差
        delta[i] = pred[i] - true[i]
    #权重的差值
    weight_deltas = outer_prod(input, delta)
    #改变率
    alpha = 0.01
    # 通过误差得到新的权重值
    for i in range(len(weights)):
        for j in range(len(weights[0])):
            weights[i][j] -= alpha * weight_deltas[i][j]
    print(weights)


if __name__ == '__main__':
    main()

输出结果:

Weights:[[0.061325, 0.1017, -0.373525], [0.0970425, 0.20013, -0.005622500000000002], [-0.0054600000000000004, 1.30024, 0.08962]]
Weight Deltas:[[3.8675000000000006, -0.1699999999999992, 7.352500000000001], [0.29575000000000007, -0.01299999999999994, 0.5622500000000001], [0.546, -0.023999999999999886, 1.038]]

知识了解

权重冻结

使一个权重为0,即相当于没有这个权重,这种方法叫做冻结权重。这是弹性固化权重神经网络(最近研发的最牛的神经网络之一)诞生由来。
四维图的2D切片视角,其中三个为权重,一个为error。这个是error平面:
四维图的2D切片视角,其中三个为权重,一个为error。这个是error平面

在a权重被冻结的情况下,a的error正在同b、c降低,但是这并不是a点移动,而是曲线的移动,

深度学习和深度神经网络方面最顶尖的创新

1.神经元的创新(循环神经网络这一分支)

2.神经元之间的创新(残缺网络、卷积网络、胶囊网络等)

几乎所有的黑科技都是围绕着两个方面突破


此文为本人学习所创,如有错误请多多指教

联系QQ1135999353

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值