Randomness
ReLU – Simple Neuron Network
Assuming for each ReLU unit, you will have input pairs with format (𝑥1,𝑥2). The output of this unit is 𝜎𝑖= max (𝑏𝑖+𝑤𝑖1𝑥1+ 𝑤𝑖2𝑥2,0). And if in your neuron network, there are n ReLU units, then the output value of this network is 𝑦=𝑠𝑖𝑔(𝑏0+ 𝑤1𝜎1+⋯+𝑤𝑛𝜎𝑛), sig means sigmoid function, 𝑠𝑖𝑔(𝑥)=11+𝑒−𝑥.
Now, we assume all b and w is a random value taken from N(0,1).
(1) Create 10 ReLU, what are the results for (1,1), (1, -1) and (-1, -1)?
(2) Create 2 layer ReLU with 2 times 5 ReLu, what are the results for (1,1), (1,-1) and (-1,-1)?
import numpy as np
import numpy.matlib
import math
import mathplotlib.pyplot as plt
### q1
def getRandNornalMum():
# ge a randum number within N(0,1)
out = numpy.matlib.randn(1)
a = out[0,0]
return a
def NReLU(n, x1, x2):
# list to store temp outputs
temp_list = [0] * n
# b and w are random value taken from N(0,1)
for i in range(n):
w1 = getRandNornalMum()
w2 = getRandNornalMum()
b = getRandNornalMum()
# get the output of the unit
temp_list[i] = max(b + w1*x1 + w2*x2, 0)
# get n+1 random number
rand_list = [0] * (n+1)
for i in range(n + 1):
rand_list[i] = getRandNornalMum()
#x is a random number
x = rand_list[0]
for i in range(n):
# b+ randomnumber*the output of the unit*
# 为什么是rand_list[i+1]呢,因为b也是从rand_list取的
x = x + rand_list[i + 1] * temp_list[i]
# return sig(x)
return sigmoid(x)
# 2*5 two layer
def N1N2ReLU(n1, n2, x1, x2):
temp = getRandNornalMum()
for i in range(n1):
temp = temp + getRandNornalMum() * NReLU(n2, x1, x2)
return sigmoid(temp)
布丰投针
def BuffonNeedle():
# 在纵向要模拟多少格
ninf = 10000000
# 生成0-ninf内的整数
temp_a = numpy.random.randint(ninf)
# 返回一个或一组服从“0~1”均匀分布的随机样本值,生成中心点的纵坐标
temp_a = temp_a + np.random.rand(1)
out = 0
if getRandNormalNum() < 0:
temp_a = - temp_a
random_coef = np.random.rand(1)
# math.pi-π
angle = random_coef * math.pi
y_max = temp_a + 0.5 * math.sin(angle)
y_min = temp_a - 0.5 * math.sin(angle)
# Math.floor() 返回小于或等于一个给定数字的最大整数
# Math.ceil() 函数返回大于或等于一个给定数字的最小整数
# 只要最大值和最小值不在同一行
if math.floor(y_max) > math.floor(y_min) or math.ceil(y_max) > math.ceil(y_min):
out = 1
return out