Julia 实现模特卡罗积分的并行性计算
其串行思想如下:
using Base.Threads
N = 10000
startIdx = 0.0
endIdx = 3.1415926
result = 0.0
for i in 1:N
xi = startIdx + (endIdx - startIdx) * rand()
y = 0.0
for j in 1:100
y += sin(xi) + cos(xi) + sqrt(xi) + sqrt(xi)
end
global result += y
end
a = 0.0
a = result
println(a)
yMean = a / Float64(N)
result1 = (endIdx - startIdx) * yMean
print(result1)
这并不是我们想要的结果,我们想提高其速度
并行如下
using Base.Threads
function compute()
N = 10000
a = 0.0
b = 3.1415926
coprogramNum = 4
X = Array{Float64}(undef, N)
for k in 1:N
global X[k] = a + (b - a) * rand()
end
acc = Atomic{Float64}(0)
@threads for i in 1:N
xi = X[i]
y = 0.0
for j in 1:100
y += sin(xi) + cos(xi) + sqrt(xi) + sqrt(xi)
end
atomic_add!(acc,y)
end
ys = 0.0
ys = acc[]
#println(a)
yMean = ys / Float64(N)
result1 = (b - a) * yMean
print("julia计算的积分值=",result1)
# print("result1")
end
@time compute()
我们也可以通过 @everywhere广播性, @parallel进行并行计算
宏命令@everywhere
‘它是一个表达式的广播,会把表达式在所有进程上执行。但它不会返回Future对象,看不到结果,所以一般用于广播声明,之后的表达式仍使用@fetch等“单体命令”远程调用以返回结果。当然,我们可以取巧一点,把一些不需要返回结果的、所有进程共有的表达式也用@everywhere广播出去,然后只对最后必须返回结果的表达式使用“单体命令”拿回结果即可。这样写起来会简洁点。’
#@everywhere
using Distributed
@everywhere function compute(N::Int)
ySum = 0.0 # counts number of points that have radial coordinate < 1, i.e. in circle
for i = 1:N
xi = 3.1415926 * rand() # random variable in 0-3.1415926
for j in 1:100
ySum += sin(xi) + cos(xi) + sqrt(xi) + sqrt(xi)
end
end
return ySum
end
function parallel_computation(N::Int; ncores::Int=4)
# compute sum of result's estimated among all cores in parallel
#
ySum = @parallel (+) for i=1:ncores
compute(ceil(Int, N / ncores))
end
yMean = ySum / Float64(N)
result1 = 3.1415926 * yMean
print("julia计算的积分值=",result1)
return result1
end
@time print(compute(10000))
@time parallel_computation(10000)