1. python的处理
整个模型的源码在此:https://github.com/shelleyHLX/tensorflow_java
多谢star
首先训练一个模型,代码如下
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.python.framework import graph_util
## -1到1之间随机数 100个
train_X = np.linspace(-1, 1, 100)
train_Y = 2*train_X + np.random.randn(*train_X.shape)*0.1
# 显示模拟数据点
plt.plot(train_X, train_Y, 'ro', label='test')
plt.legend()
plt.show()
# 创建模型
# 占位符
X = tf.placeholder("float",name='X')
Y = tf.placeholder("float",name='Y')
# 模型参数
# W初始化为-1到1之间的一个数字
W = tf.Variable(tf.random_normal([1]), name="weight")
# b初始化为0 也是一维 定义变量
b = tf.Variable(tf.zeros([1]), name="bias")
# 前向结构 mulpiply两个数 相乘
z = tf.multiply(X, W) + b
op = tf.add(tf.multiply(X, W),b,name='results')
# 反向优化
cost = tf.reduce_mean(tf.square(Y - z))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# 初始化所有变量
init = tf.global_variables_initializer()
# 定义参数
training_epochs = 20
display_step = 2
def moving_avage(a, w=10):
if len(a) < w:
return a[:]
return [val if idx<w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
saver = tf.train.Saver()
# 启动session
with tf.Session() as sess:
sess.run(init)
# 存放批次值和损失值
plotdata = {"batchsize": [], "loss": []}
# 向量模型输入数据
for e