'''
logistic回归函数
'''
from __future__ import print_function
import tensorflow as tf
#导入MNIST数据
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/",one_hot=True)
#参数
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
#tf图表输入
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#设置模型权重
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
#构建模型
pred = tf.nn.softmax(tf.matmul(x,W) + b)
#交叉熵最小化误差
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred),reduction_indices=1))
#梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#初始化变量
init = tf.global_variables_initializer()
#开始训练
with tf.Session() as sess:
#开始初始化
sess.run(init)
#训练周期
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples / batch_size)
#循环
for i in range(total_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
#运行优化操作(backprop)和成本操作(获取损失值)
_, c = sess.run([optimizer,cost],feed_dict={x:batch_xs,y:batch_ys})
#计算平均损失
avg_cost += c / total_batch
#显示每步日志
if (epoch + 1) % display_step == 0:
print("Epoch:",'%04d' % (epoch + 1),"cost=","{:.9f}".format(avg_cost))
print("优化完成")
#测试模型
correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
#计算准确度
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print("Accuracy:",accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
python实现logistic回归算法
最新推荐文章于 2024-05-11 21:41:26 发布