慕课网上的一个案例
涉及到phtyon的numpy tensorflow 和matplot
以及神经网络
以及神经网络
分享一下
# layer1:激励函数+乘加运算
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
date = np.linspace(1,15,15)
endPrice = np.array([23.59,21.01,19.68,20.07,18.98,19.8,19.45,21.48,21.81,22.08,23.85,23.8,24.28,24.25,24.39])
beginPrice = np.array([21.82,19.75,21.05,19.07,19.62,19.69,21.29,21.96,21.77,23.85,24,24.87,24.42,24.25,24])
print(date)
plt.figure()
for i in range(0,15):
dateOne = np.zeros([2])
dateOne[0] = i;
dateOne[1] = i;
priceOne = np.zeros([2])
priceOne[0] = beginPrice[i]
priceOne[1] = endPrice[i]
if endPrice[i]>beginPrice[i]:
plt.plot(dateOne,priceOne,'r',lw=8)
else:
plt.plot(dateOne,priceOne,'g',lw=8)
# A(15x1)*w1(1x10)+b1(1*10) = B(15x10)
# B(15x10)*w2(10x1)+b2(15x1) = C(15x1)
# 1 A B C
dateNormal = np.zeros([15,1])
priceNormal = np.zeros([15,1])
for i in range(0,15):
dateNormal[i,0] = i/14.0;
priceNormal[i,0] = endPrice[i]/30.0;
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])
w1 = tf.Variable(tf.random_uniform([1,10],0,1))
b1 = tf.Variable(tf.zeros([1,10]))
wb1 = tf.matmul(x,w1)+b1
layer1 = tf.nn.relu(wb1) # 激励函数
w2 = tf.Variable(tf.random_uniform([10,1],0,1))
b2 = tf.Variable(tf.zeros([15,1]))
wb2 = tf.matmul(layer1,w2)+b2
layer2 = tf.nn.relu(wb2)
loss = tf.reduce_mean(tf.square(y-layer2))#y 真实 layer2 计算
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(0,10000):
sess.run(train_step,feed_dict={x:dateNormal,y:priceNormal})
pred = sess.run(layer2,feed_dict={x:dateNormal})
predPrice = np.zeros([15,1])
for i in range(0,15):
predPrice[i,0]=(pred*30)[i,0]
plt.plot(date,predPrice,'b',lw=1)
plt.show()
# layer1:激励函数+乘加运算
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
date = np.linspace(1,15,15)
endPrice = np.array([23.59,21.01,19.68,20.07,18.98,19.8,19.45,21.48,21.81,22.08,23.85,23.8,24.28,24.25,24.39])
beginPrice = np.array([21.82,19.75,21.05,19.07,19.62,19.69,21.29,21.96,21.77,23.85,24,24.87,24.42,24.25,24])
print(date)
plt.figure()
for i in range(0,15):
dateOne = np.zeros([2])
dateOne[0] = i;
dateOne[1] = i;
priceOne = np.zeros([2])
priceOne[0] = beginPrice[i]
priceOne[1] = endPrice[i]
if endPrice[i]>beginPrice[i]:
plt.plot(dateOne,priceOne,'r',lw=8)
else:
plt.plot(dateOne,priceOne,'g',lw=8)
# A(15x1)*w1(1x10)+b1(1*10) = B(15x10)
# B(15x10)*w2(10x1)+b2(15x1) = C(15x1)
# 1 A B C
dateNormal = np.zeros([15,1])
priceNormal = np.zeros([15,1])
for i in range(0,15):
dateNormal[i,0] = i/14.0;
priceNormal[i,0] = endPrice[i]/30.0;
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])
w1 = tf.Variable(tf.random_uniform([1,10],0,1))
b1 = tf.Variable(tf.zeros([1,10]))
wb1 = tf.matmul(x,w1)+b1
layer1 = tf.nn.relu(wb1) # 激励函数
w2 = tf.Variable(tf.random_uniform([10,1],0,1))
b2 = tf.Variable(tf.zeros([15,1]))
wb2 = tf.matmul(layer1,w2)+b2
layer2 = tf.nn.relu(wb2)
loss = tf.reduce_mean(tf.square(y-layer2))#y 真实 layer2 计算
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(0,10000):
sess.run(train_step,feed_dict={x:dateNormal,y:priceNormal})
pred = sess.run(layer2,feed_dict={x:dateNormal})
predPrice = np.zeros([15,1])
for i in range(0,15):
predPrice[i,0]=(pred*30)[i,0]
plt.plot(date,predPrice,'b',lw=1)
plt.show()