tensorflow--Logistic线性回归案例(手写数字)

 

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import input_data

mnist=input_data.read_data_sets('data/',one_hot='True')
trainimg=mnist.train.images
trainlables=mnist.train.labels
testimg=mnist.test.images
testlables=mnist.test.labels
print('***mnist data loaded***')

print(trainimg.shape)
print(trainlables.shape)
print(testimg.shape)
print(testlables.shape)

Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
***mnist data loaded***
(55000, 784)
(55000, 10)
(10000, 784)
(10000, 10)

x=tf.placeholder("float",[None,784])#784是维度,none表示的是无限多
y=tf.placeholder("float",[None,10])
W=tf.Variable(tf.zeros([784,10]))#每个数字是784像素点的,所以w与x相乘的话也要有784个,b-10表示这个10分类的
b=tf.Variable(tf.zeros([10]))
#回归模型
actv=tf.nn.softmax(tf.matmul(x,W)+b)
#cost function
cost=tf.reduce_mean(-tf.reduce_sum(y*tf.log(actv),reduction_indices=1))
#优化
learning_rate=0.01
optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

pred=tf.equal(tf.argmax(actv,1),tf.argmax(y,1))
#正确率
accr=tf.reduce_mean(tf.cast(pred,"float"))
#初始化
init=tf.global_variables_initializer()
training_epoches=50
batch_size=100
display_step=5
sess=tf.Session()
sess.run(init)
for step in range(training_epoches):
    avg_cost=0
    num_batch=int(mnist.train.num_examples/batch_size)
    for i in range(num_batch):
        batch_xs,batch_ys=mnist.train.next_batch(batch_size)
        feeds={x:batch_xs,y:batch_ys}
        sess.run(optm,feed_dict=feeds)
        avg_cost+=sess.run(cost,feed_dict=feeds)/num_batch
    if step%display_step==0:
        feeds_train={x:batch_xs,y:batch_ys}
        x_test,y_test=mnist.test.next_batch(batch_size*2)
        feeds_test={x:x_test,y:y_test}
        train_acc=sess.run(accr,feed_dict=feeds_train)
        test_acc=sess.run(accr,feed_dict=feeds_test)
        print('EPOCH:%d / %d  cost:%f acc_train:%f  cost_test:%f'%(step,training_epoches,avg_cost,train_acc,test_acc))
print('DONE')

EPOCH:0 / 50  cost:1.176480 acc_train:0.830000  cost_test:0.845000
EPOCH:5 / 50  cost:0.441004 acc_train:0.870000  cost_test:0.880000
EPOCH:10 / 50  cost:0.383355 acc_train:0.820000  cost_test:0.885000
EPOCH:15 / 50  cost:0.357256 acc_train:0.890000  cost_test:0.935000
EPOCH:20 / 50  cost:0.341496 acc_train:0.900000  cost_test:0.920000
EPOCH:25 / 50  cost:0.330561 acc_train:0.930000  cost_test:0.920000
EPOCH:30 / 50  cost:0.322342 acc_train:0.900000  cost_test:0.895000
EPOCH:35 / 50  cost:0.315983 acc_train:0.940000  cost_test:0.940000
EPOCH:40 / 50  cost:0.310727 acc_train:0.950000  cost_test:0.890000
EPOCH:45 / 50  cost:0.306352 acc_train:0.910000  cost_test:0.885000
DONE

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值