python神经网络分类预测_使用神经网络做二分类预测

importtensorflow as tfimportnumpy as npfrom sklearn.metrics importroc_auc_score, roc_curveimportmatplotlib.pyplot as pltfrom tensorflow.contrib importlayersclassdnnModel():def __init__(self,x_train,y_train,x_test,y_test,learn_rate):

self.epoch=0

self.learn_rate=learn_rate

self.h1_dimen=500self.h2_dimen=500self.load_data2(x_train,y_train,x_test,y_test)#self.load_data(x_train, y_train)

defload_data2(self,x_train,y_train,x_test,y_test):

self.x_datas=x_train

self.y_datas=y_train

self.x_datas_test=x_test

self.y_datas_test=y_test

self.num_datas=self.y_datas.shape[0]

self.num_datas_test=self.y_datas_test.shape[0]

self.input_dimen=self.x_datas.shape[1]

self.output_dimen= self.y_datas.shape[1]

self.shullf()defload_data(self,x,y):

datas_len=x.shape[0]

self.x_datas=x[0:datas_len*8//10]

self.y_datas=y[0:datas_len*8//10]

self.x_datas_test=x[datas_len*8//10:]

self.y_datas_test=y[datas_len*8//10:]

self.num_datas=self.y_datas.shape[0]

self.num_datas_test=self.y_datas_test.shape[0]

self.input_dimen=self.x_datas.shape[1]

self.output_dimen= self.y_datas.shape[1]

self.shullf()#self.output_dimen = 1

defshullf(self):

perm=np.arange(self.num_datas)

np.random.shuffle(perm)

self.x_datas=self.x_datas[perm]

self.y_datas=self.y_datas[perm]

perm=np.arange(self.num_datas_test)

np.random.shuffle(perm)

self.x_datas_test=self.x_datas_test[perm]

self.y_datas_test=self.y_datas_test[perm]def weight_variable(self,shape,reg=True):

init=tf.random_normal(shape=shape,dtype=tf.float32)ifreg:if reg ==True:

regularizer= layers.l2_regularizer(0.05)else:

regularizer=Nonereturntf.Variable(init)defbias_variable(self,shape):

init=tf.constant(0.1,dtype=tf.float32,shape=shape)returntf.Variable(init)defnext_batch(self,batchsize):

start=self.epoch

self.epoch+=batchsizeif self.epoch>self.num_datas:

perm=np.arange(self.num_datas)

np.random.shuffle(perm)

self.x_datas=self.x_datas[perm]

self.y_datas=self.y_datas[perm]

self.epoch=batchsize

start=0

end=self.epochreturnself.x_datas[start:end],self.y_datas[start:end]def add_layer(self,x,input_dimen,output_dimen,name,relu=True):

with tf.name_scope(name):

weight=self.weight_variable([input_dimen, output_dimen])

bias=self.bias_variable([output_dimen])

tf.summary.histogram(name+"/weight",weight)

tf.summary.histogram(name+"/bias",bias)ifrelu:return tf.nn.relu(tf.matmul(x,weight)+bias)else:return tf.matmul(x,weight)+biasdefconstructDnn(self,input_x):#输入层

input_layer=self.add_layer(input_x,self.input_dimen,500,name="input_layer",relu=True)#一个隐藏层

h1=self.add_layer(input_layer,500,500,relu=True,name="hidden_layer1")

h1_drop=tf.nn.dropout(h1,keep_prob=0.7)#在增加一个隐藏层

h2=self.add_layer(h1_drop,500,1024,relu=True,name="hidden_layer2")

h2_drop=tf.nn.dropout(h2,keep_prob=0.8)#在增加一个隐藏层

#h3 = self.add_layer(h2_drop, 500, 500, relu=True, name="hidden_layer2")

#h3_drop = tf.nn.dropout(h3, keep_prob=0.8)

#输出层

output_layer=self.add_layer(h2_drop,1024,self.output_dimen,"output_layer",relu=False)

tf.summary.histogram('/outputs', output_layer)returnoutput_layerdeftrain(self,maxTrainTimes,batchsize):

X=tf.placeholder(dtype=tf.float32,shape=[None,self.input_dimen])

Y=tf.placeholder(dtype=tf.float32,shape=[None,self.output_dimen])

y_pre=self.constructDnn(X)

entropy=tf.nn.softmax_cross_entropy_with_logits(logits=y_pre,labels=Y)#entropy=-tf.reduce_sum(Y*tf.log(tf.nn.softmax(y_pre)))

loss =tf.reduce_mean(entropy)

optimizer=tf.train.AdamOptimizer(self.learn_rate).minimize(loss)

with tf.name_scope("evl"):

correct=tf.equal(tf.argmax(y_pre,1),tf.argmax(Y,1))

accuracy=tf.reduce_mean(tf.cast(correct,dtype=tf.float32))

a= tf.cast(tf.argmax(y_pre, 1), tf.float32)

b= tf.cast(tf.argmax(Y, 1), tf.float32)

auc=tf.contrib.metrics.streaming_auc(a, b)

tf.summary.scalar("loss", loss)

tf.summary.scalar("accuracy", accuracy)#tf.summary.scalar("auc", auc)

merged_summary_op =tf.summary.merge_all()

summary_writer= tf.summary.FileWriter('./tmp/mnist_logs')

with tf.Session() as sess:

sess.run(tf.global_variables_initializer())

sess.run(tf.initialize_local_variables())

summary_writer.add_graph(sess.graph)for i inrange(maxTrainTimes):

x_train,y_train=self.next_batch(batchsize)

sess.run(optimizer,feed_dict={X:x_train,Y:y_train})#print(sess.run(y_pre,feed_dict={X:x_train,Y:y_train}))

#print(sess.run(entropy, feed_dict={X: x_train, Y: y_train}))

if i%100==0:print("train {} loss:".format(i),sess.run(loss,feed_dict={X:x_train,Y:y_train}))

s= sess.run(merged_summary_op, feed_dict={X:x_train,Y:y_train})

summary_writer.add_summary(s, i)

testTime=self.num_datas_test//batchsizefor i inrange(testTime):

x_train, y_train=self.next_batch(batchsize)

testAcc=sess.run(accuracy, feed_dict={X: x_train, Y: y_train})

testAuc=sess.run(auc,feed_dict={X: x_train, Y: y_train})

y_pred_pro= sess.run(y_pre,feed_dict={X: x_train, Y: y_train})

y_scores=np.array(y_pred_pro)

auc_value=roc_auc_score(y_train, y_scores)

a=np.array(y_train)[:,1]

b=y_scores[:,1]

fpr, tpr, thresholds= roc_curve(a,b , pos_label=1.0)

plt.figure(figsize=(6, 4))

plt.plot(fpr, tpr, color='blue', linewidth=2, label='AUC (area:%0.4f)' %auc_value)

plt.plot([0,1], [0, 1], color='black', linewidth=2, linestyle='--')

plt.xlim([0.0, 1.0])

plt.ylim([0.0, 1.0])

plt.xlabel('False Positive Rate')

plt.ylabel('True Positive Rate')

plt.title('ROC')

plt.legend(loc="lower right")

plt.show()print("test {},accuracy:{},auc: {}".format(i,testAcc,testAuc))defsvm_train(self,maxTrainTimes,batchsize):#初始化feedin

x_data = tf.placeholder(shape=[None, self.input_dimen], dtype=tf.float32)

y_target= tf.placeholder(shape=[None,1], dtype=tf.float32)#创建变量

A = tf.Variable(tf.random_normal(shape=[self.input_dimen, 1]))

b= tf.Variable(tf.random_normal(shape=[1, 1]))#定义线性模型

model_output =tf.subtract(tf.matmul(x_data, A), b)#Declare vector L2 'norm' function squared

l2_norm =tf.reduce_sum(tf.square(A))#Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2

alpha = tf.constant([0.01])

classification_term= tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))

loss=tf.add(classification_term, tf.multiply(alpha, l2_norm))

my_opt= tf.train.GradientDescentOptimizer(0.01)

train_step=my_opt.minimize(loss)

with tf.Session() as sess:

sess.run(tf.global_variables_initializer())for i inrange(maxTrainTimes):

train_x,train_y=self.next_batch(batchsize)

train_y=train_y.reshape([-1,1])

sess.run(train_step, feed_dict={x_data: train_x, y_target: train_y})if i%100==0:print("loss in train step {}: {}".format(i,sess.run(loss,feed_dict={x_data: train_x, y_target: train_y})))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值