多个权重梯度下降

def neural_network(input,weights):
    output = 0
    for i in range(len(input)):
        output += (input[i]*weights[i])
    return output
def ele_mul(scalar,vector):
    output = [0,0,0]
    for j in range(len(output)):
        output[j] = vector[j]*scalar
    return output

toes = [8.5, 9.5, 9.9, 9.0]
wlrec = [0.65, 0.8, 0.8, 0.9]
nfans = [1.2, 1.3, 0.5, 1.0]

win_or_lose_binary = [1,1,0,1]
ture = win_or_lose_binary[0]
input = [toes[0],wlrec[0],nfans[0]]
weights = [0.1,0.2,-0.1]
alpha = 0.01

for iter in range(3):
    pred = neural_network(input,weights)
    error = (pred-ture)**2
    delta = pred-ture
    weight_delta = ele_mul(delta,weights)

    print("pred值是",pred)
    print("error值是",error)
    print("delta = pred-ture值是",delta)
    print("weight_delta值是",weight_delta)
    print("weights值是", weights)

    # print("weight_delta值是",weight_delta)
    print("iter值是",weights)
    # print("weight_delta值是",weight_delta)
    for h in range(len(weights)):
        weights[h] -= alpha*weight_delta[h]
        print("^^^^^"*10)
        print("当前循环h的值是:",h)
        print("alpha的值是:", alpha)
        print("当前weight_delta[h]的值是:",weight_delta[h])
        print("当前weights[h]的值是:", weights[h])
        print("^^^^^" * 10)

    print("weight值是",weights)
    print("*************************"*10)

结果是

C:\Users\admin\AppData\Local\Programs\Python\Python39\python.exe "D:\python test\20220823.py" 
pred值是 0.8600000000000001
error值是 0.01959999999999997
delta = pred-ture值是 -0.1399999999999999
weight_delta值是 [-0.013999999999999992, -0.027999999999999983, 0.013999999999999992]
weights值是 [0.1, 0.2, -0.1]
iter值是 [0.1, 0.2, -0.1]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 0
alpha的值是: 0.01
当前weight_delta[h]的值是: -0.013999999999999992
当前weights[h]的值是: 0.10014
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 1
alpha的值是: 0.01
当前weight_delta[h]的值是: -0.027999999999999983
当前weights[h]的值是: 0.20028
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 2
alpha的值是: 0.01
当前weight_delta[h]的值是: 0.013999999999999992
当前weights[h]的值是: -0.10014
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
weight值是 [0.10014, 0.20028, -0.10014]
**********************************************************************************************************************************************************************************************************************************************************
pred值是 0.8612040000000001
error值是 0.019264329615999977
delta = pred-ture值是 -0.13879599999999992
weight_delta值是 [-0.013899031439999992, -0.027798062879999984, 0.013899031439999992]
weights值是 [0.10014, 0.20028, -0.10014]
iter值是 [0.10014, 0.20028, -0.10014]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 0
alpha的值是: 0.01
当前weight_delta[h]的值是: -0.013899031439999992
当前weights[h]的值是: 0.1002789903144
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 1
alpha的值是: 0.01
当前weight_delta[h]的值是: -0.027798062879999984
当前weights[h]的值是: 0.2005579806288
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 2
alpha的值是: 0.01
当前weight_delta[h]的值是: 0.013899031439999992
当前weights[h]的值是: -0.1002789903144
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
weight值是 [0.1002789903144, 0.2005579806288, -0.1002789903144]
**********************************************************************************************************************************************************************************************************************************************************
pred值是 0.8623993167038401
error值是 0.01893394804357011
delta = pred-ture值是 -0.13760068329615993
weight_delta值是 [-0.013798457587510445, -0.02759691517502089, 0.013798457587510445]
weights值是 [0.1002789903144, 0.2005579806288, -0.1002789903144]
iter值是 [0.1002789903144, 0.2005579806288, -0.1002789903144]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 0
alpha的值是: 0.01
当前weight_delta[h]的值是: -0.013798457587510445
当前weights[h]的值是: 0.10041697489027511
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 1
alpha的值是: 0.01
当前weight_delta[h]的值是: -0.02759691517502089
当前weights[h]的值是: 0.20083394978055022
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
当前循环h的值是: 2
alpha的值是: 0.01
当前weight_delta[h]的值是: 0.013798457587510445
当前weights[h]的值是: -0.10041697489027511
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
weight值是 [0.10041697489027511, 0.20083394978055022, -0.10041697489027511]
**********************************************************************************************************************************************************************************************************************************************************

进程已结束,退出代码0

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值