import tensorflow as tf
import numpy as np
from keras.utils.np_utils import *
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sess = tf.InteractiveSession()#构建图
data = np.genfromtxt('C:/Users/78405/Desktop/fi2014.csv',dtype =float,delimiter=',',skip_header=1)
print(data)
x_data = np.array([x[6] for x in data])[:, np.newaxis] #none *18
i=0#2-19
while i<4:
xi_data = np.array([x[i] for x in data])[:, np.newaxis]
x_data = np.c_[x_data, xi_data]
i+=1
print(x_data)
yi_data = np.array([y[9] for y in data])
print(yi_data)
y_data = to_categorical(yi_data,3)
print(y_data)
#输入层(12)
xs = tf.placeholder(tf.float32,[None,5])#输入18个特征值
ys = tf.placeholder(tf.float32,[None,3])#7个标签
#隐层(100)
W1 = tf.Variable(tf.random_normal([5,100]))#权值
b1 = tf.Variable(tf.zeros([100])+0.1)#偏置
Wx_plus_b1 = tf.matmul(xs,W1)+b1#乘以权值加上偏置 (None,200)+1,200 = = +1
output1 = tf.nn.sigmoid(Wx_plus_b1)#输出函数sigmoid
#输出层(3)
W2 = tf.Variable(tf.random_normal([100,3]))
b2 = tf.Variable(tf.zeros([3])+0.1)
Wx_plus_b2 = tf.matmul(output1,W2)+b2 #(None,1)
output2 = tf.nn.softmax(Wx_plus_b2)#输出层激活函数softmax
#loss
LEARNING_RATE_BASE = 0.4 # 设置初始学习率
LEARNING_RATE_DECAY = 0.9# 设置学习衰减率
LEARNING_RATE_STEP = 95 # 设置喂入多少轮BATCH_SIZE之后更新一次学习率,一般设置为 总样本数/BATCH_SIZE
global_step = tf.Variable(0,trainable = False)
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
global_step,
LEARNING_RATE_STEP,
LEARNING_RATE_DECAY,
staircase=True)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(output2),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy,global_step=global_step)
# 进行训练
#初始化
tf.global_variables_initializer().run()
summary_writer = tf.summary.FileWriter('newlog/', sess.graph)
# summary_writer = tf.summary.FileWriter('./log/', tf.get_default_graph())
# losslist = []
# xone = range(1000)
# i=0
for i in range(1000):
#batch_xs,batch_ys = mnist.train.next_batch(100)
_, loss_value = sess.run([train_step, cross_entropy], feed_dict={xs: x_data, ys: y_data}) # 进行梯度下降运算,并计算每一步的损失
train_step.run({xs:x_data,ys:y_data})
if i%50==0:
print("第%d步,loss_value = %f" % (i, loss_value)) # 输出一次损失
print(sess.run(output2,feed_dict={xs:x_data})) # 显示每一次Softmax回归的结果,即每一类别的概率值
#plt.plot(i,loss_value,'r-')
#plt.show()
ceshi = np.array([5,10, 8.948, 8.43, 9.284])[np.newaxis,:]
print('预测的值是:',sess.run(output2, feed_dict={xs:ceshi}))
# 模型评估,测试集合
correct_prediction = tf.equal(tf.argmax(output2,1), tf.argmax(ys,1))#按行取最大值的下标,判断市够相等
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#bool转换成float进行,再进行均值
print('识别准确率:')
print(accuracy.eval({xs: x_data, ys: y_data}))