# coding: utf-8
import tensorflow as tf
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
num_classes=2
#data=pd.DataFrame(pd.read_csv('/home/henson/Desktop/huanping/huanping.csv_EDGE_NBD.csv',encoding='gb18030'))
data=pd.DataFrame(pd.read_csv('sele.csv',encoding='utf-8'))
data.head()
sess = tf.Session()
data=shuffle(data)
print(data)
X = np.array(data[['author_degree1','author_degree2','No']])
y = np.array(data[['isBD']])
y=y[:,0]
y = (np.arange(2) == y[:,None]).astype(np.float32)
#y_train = tf.cast(y, tf.int32)
#print(y_train)
#print(X,y[:,0])
#StandardScaler= StandardScaler() #标准化
#X_Standard = StandardScaler.fit_transform(X)
X_train=X
#y_Standard = StandardScaler.fit_transform(y)
"""
X_train,X_test,y_train,y_test = train_test_split(X_Standard,y,test_size=0,random_state=0)
X_train = scale(X_train)
X_test = scale(X_test)
"""
#y_train = scale(y.reshape((-1,1))) #映射数据到(-1,1),这里不需要
#y_test = scale(y_test.reshape((-1,1)))
def add_layer(inputs,input_size,output_size,activation_function=None):
with tf.variable_scope("Weights"):
Weights = tf.Variable(tf.random_normal(shape=[input_size,output_size]),name="weights")
with tf.variable_scope("biases"):
biases = tf.Variable(tf.zeros(shape=[1,output_size]) + 0.1,name="biases")
with tf.name_scope("Wx_plus_b"):
Wx_plus_b = tf.matmul(inputs,Weights) + biases
with tf.name_scope("dropout"):
Wx_plus_b = tf.nn.dropout(Wx_plus_b,keep_prob=keep_prob_s)
if activation_function is None:
return Wx_plus_b
else:
with tf.name_scope("activation_function"):
return activation_function(Wx_plus_b)
xs = tf.placeholder(shape=[None,X_train.shape[1]],dtype=tf.float32,name="inputs")
ys = tf.placeholder(shape=[None,2],dtype=tf.float32)
#ys = tf.placeholder(shape=[None,num_classes],dtype=tf.float32)
print(ys.shape)
keep_prob_s = tf.placeholder(dtype=tf.float32)
with tf.name_scope("layer_1"):
l1 = add_layer(xs,3,10,activation_function=tf.nn.relu)
with tf.name_scope("layer_2"):#
l2 = add_layer(l1,10,10,activation_function=tf.nn.relu)
with tf.name_scope("y_pred"):
logits = add_layer(l2, 10, num_classes)
print("logits:",logits)
with tf.name_scope("loss"):
#loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - logits),reduction_indices=[1]))
#loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=ys,logits=tf.argmax(logits,1)))
#loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=ys, logits=logits))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys, logits=logits))
tf.summary.scalar("loss",tensor=loss)
with tf.name_scope("train"):
train_op =tf.train.GradientDescentOptimizer(learning_rate=0.03).minimize(loss)
#train_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
correct_prediction = tf.equal(tf.arg_max(logits, 1), tf.arg_max(ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def fit(X, y, n, keep_prob):
init = tf.global_variables_initializer()
#feed_dict_train = {ys:y[:,:], xs: X, keep_prob_s: keep_prob}
feed_dict_train = {ys: y, xs: X, keep_prob_s: keep_prob}
with tf.Session() as sess:
saver = tf.train.Saver(tf.global_variables(), max_to_keep=15)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(logdir="nn_huangping_log", graph=sess.graph) #写tensorbord
sess.run(init)
for i in range(n):
_loss, _ = sess.run([loss, train_op], feed_dict=feed_dict_train)
if i % 100 == 0:
print("epoch:%d/tloss:%.5f " % (i, _loss))
acc= sess.run(accuracy, feed_dict=feed_dict_train)
print(acc)
rs = sess.run(merged, feed_dict=feed_dict_train)
writer.add_summary(summary=rs, global_step=i) #写tensorbord
saver.save(sess=sess, save_path="model/nn_huanping.model", global_step=i) # 保存模型
fit(X_train, y,10000, 0.5)
#不能用tensor对象输入,tensor对象不是数组了
#可以通过 tf.histogram_summary(layer_name + '/weights', Weights) 追踪权重值和bias 不知道能不能通过session.run()来获取输出
#
之前的数据没有经过筛选,发现出现输入特征向量一样,但却属于两个不同的分类里,标了两个不同的标签,难怪结果很奇怪。
所以一定要好好观察数据