Julia: Flux.jl尝试

这个例子仍是 文档中的例子。原来文档有些坑。

一、简单框架

using Flux 
using Flux: train!,Descent,Dense


actual(x) = 4x + 2
x_train, x_test = hcat(0:5...), hcat(6:10...)
y_train, y_test = actual.(x_train), actual.(x_test)

predict = Dense(1, 1) # Dense(1=>1) 已经不能用
loss(x, y) = Flux.Losses.mse(predict(x), y)

opt = Descent()
data = [(x_train, y_train)]
parameters = Flux.params(predict)

for epoch in 1:200
    train!(loss, parameters, data, opt)
    ls = loss(x_train, y_train)
    error = predict(x_test) .- y_test
    println("epoch : $epoch, loss :$ls  parameters: $parameters" )
    println("error : $error")

end

运行结果:


> epoch : 1, loss :96.65946  parameters: Params([Float32[7.3060203;;],
> Float32[1.783612]]) error : Float32[19.619732 22.925755 26.231773
> 29.537796 32.84382] epoch : 2, loss :91.72043  parameters: Params([Float32[1.3531775;;], Float32[0.17387962]]) error :
> Float32[-17.707054 -20.353878 -23.0007 -25.647522 -28.294346] epoch :
> 3, loss :87.035255  parameters: Params([Float32[7.118746;;],
> Float32[1.8625149]]) error : Float32[18.57499 21.693737 24.81248
> 27.931229 31.049973] epoch : 4, loss :82.5907  parameters: Params([Float32[1.4697881;;], Float32[0.330639]]) error :
> Float32[-16.850632 -19.380844 -21.911057 -24.441269 -26.971481] epoch
> : 5, loss :78.374275  parameters: Params([Float32[6.9431906;;],
> Float32[1.9296173]]) error : Float32[17.58876 20.531952 23.475143
......
> Float32[0.119464874 0.13848305 0.15750122 0.1765213 0.19553757] epoch
> : 196, loss :0.0036572255  parameters: Params([Float32[3.9814754;;],
> Float32[1.9947717]]) error : Float32[-0.11637688 -0.13490105
> -0.15342712 -0.1719513 -0.19047546] epoch : 197, loss :0.0034711396  parameters: Params([Float32[4.018051;;], Float32[2.0050797]]) error :
> Float32[0.113386154 0.1314373 0.14949036 0.1675415 0.18559265] epoch :
> 198, loss :0.0032945399  parameters: Params([Float32[3.9824178;;],
> Float32[1.9950383]]) error : Float32[-0.11045456 -0.1280365
> -0.14561844 -0.1632042 -0.18078613] epoch : 199, loss :0.0031269772  parameters: Params([Float32[4.0171328;;], Float32[2.0048218]]) error :
> Float32[0.10761833 0.124752045 0.14188385 0.15901566 0.17614746] epoch
> : 200, loss :0.0029679425  parameters: Params([Float32[3.983312;;],
> Float32[1.9952911]]) error : Float32[-0.10483742 -0.12152672
> -0.13821411 -0.1548996 -0.1715889]

二、核心概念:梯度

using Flux
using Flux: train!,Descent,Dense,Params

x = [1,2,2,3,5];
y = [2,1,2,2,4];
z = [1,8,3,6,5];

## f(x1,x2,x3,x4,x5)= x1^2 +x2^2 +x3^2 +x4^2 +x5^2 = sum((x).^2)
## df/dx1 = 2*x1,其它x1,x2,x3,x4,x5类推
## gradient(f,x) = [df/x1,df/x2,df/x3,df/x4,df/x5]
f(x) = sum((x).^2); # 需要是一个值 
println("f -> gradient: $(gradient(f,x))")
gs = gradient(params(x)) do
    f(x)
end
## g(x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) = (x1-y1)^2 +(x2-y2)^2 +(x3-y3)^2 +(x4-y4)^2 +(x5-y5)^2 = sum((x-y).^2)
## gradient(g,x,y) = ([dg/dx1,dg/dx2,dg/dx3,dg/dx4,dg/dx5],[dg/dy1,dg/dy2,dg/dy3,dg/dy4,dg/dy5])
## 对x1而言,dg/dx1 = 2*x1 -2y1; 其它 x1,x2,x3,x4,x5,y1,y2,y3,y4,y5依此类推
g(x,y) = sum((x-y).^2);
println("g -> gradient: $(gradient(g,x,y)))")

## 扩展到其它更多维参数也是类似
k(x,y,z) = sum((x-y).^2 + sin.((x-z).^2));
## 对x1而言,dk/dx1 = 2*x1 -2y1 + cos((x1-z1)^2)*(2*x1 -2*z1)
## gradient(k,x,y,z) = ([dk/dx1,dk/dx2,dk/dx3,dk/dx4,dk/dx5],
[dk/dy1.dk/dy2,dk/dy3,dk/dy4,dk/dy5],
[dk/dz1,dk/dz2,dk/dz3,dk/dz4,dk/dz5])
println("k -> gradient: $(gradient(k,x,y,z))")

运行 结果:

f -> gradient: ([2.0, 4.0, 4.0, 6.0, 10.0],)
g -> gradient: 
([-2.0, 2.0, 0.0, 2.0, 2.0], [2.0, -2.0, 0.0, -2.0, -2.0]))
k -> gradient : 
([-2.0, 3.535564275528856, -1.0806046117362795, 7.466781571308061, 2.0], 
[2.0, -2.0, -0.0, -2.0, -2.0],
 [-0.0, -1.535564275528856, 1.0806046117362795, -5.466781571308061, -0.0])

对gradient(g,x,y)验算和检查:

for i in 1:5
    value = 2*x[i] -2y[i] + cos((x[i]-z[i])^2)*(2*x[i] -2*z[i])
    println("$i => $value") 
end

运行结果,表明计算是正确的。

1 => -2.0
2 => 3.535564275528856
3 => -1.0806046117362795
4 => 7.466781571308061
5 => 2.0

需要说明的是,在机器学习中,梯度是损失函数对权重向量的偏导,而不是对输入值X
.
三、待续

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值