import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# R25=10k B25/50=3470 NTC热敏电阻特性数据表
# 加载样本数据 格式:电阻值(K) 温度值(℃)
dat = np.loadtxt('trainData.txt')
# 数据归一化处理
R = dat[:,0]
T = dat[:,1]
R_K = R.max()-R.min()
R_B = R.min()
T_K = T.max()-T.min()
T_B = T.min()
R = (R - R_B)/R_K
T = (T - T_B)/T_K
R = R.reshape(141,1)
T = T.reshape(141,1)
X = tf.placeholder(tf.float32, shape = [None, 1])
Y = tf.placeholder(tf.float32, shape = [None, 1])
# 定义层
def add_layer(input,in_size,out_size,activation_fun):
"""
:param input: 输入数据
:param in_size: 输入矩阵列数
:param out_size: 输出矩阵列数
:param activation_fun:激活函数
:return:输出矩阵
"""
weights = tf.Variable(tf.random_normal([in_size, out_size]))
bias = tf.Variable(tf.zeros([1, out_size]))
z_i = tf.matmul(input, weights) + bias
return activation_fun(z_i)
# 正向传播
# 添加隐藏层 该层10个神经元
out_h = add_layer(X,1,10,tf.nn.sigmoid)
# 输出层
out = add_layer(out_h,10,1,tf.nn.sigmoid)
# 定义损失函数
loss = tf.reduce_mean(tf.reduce_sum(tf.square(out - Y), reduction_indices=[1]))
# 学习率为
learning_rate = 20.2
# 梯度下降优化器,让损失最小化
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# 初始化tensor flow中的变量
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(10000):
sess.run(train_step, feed_dict={X: R, Y: T})
if i % 100== 0:
# 打印损失值
e = sess.run(loss, feed_dict={X: R, Y: T})
print(e)
# 预测值
y = sess.run(out, feed_dict={X: R, Y: T})
y = y*T_K+T_B
dat_R = dat[:,0]
dat_T = dat[:,1]
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.xlabel("电阻")
plt.ylabel("温度")
plt.plot(dat_R ,dat_T,'r',label='R-T特性曲线')
plt.plot(dat_R,y,label='拟合曲线')
plt.legend()
plt.grid()
plt.show()
运行结果: