一个简单的TF程序设计样例
#encoding=utf-8
import tensorflow as tf
import numpy as np
def net(input):
global filter, bias, y1, y2
init_random = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None, dtype=tf.float64)
filter = tf.get_variable('filter', shape=[2,2,1,1], initializer=init_random, dtype=tf.float64)
bias = tf.Variable([0], dtype=tf.float64, name='bias')
y1 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
y2 = tf.nn.sigmoid(y1 + bias)
return y2
def display(sess):
#print '--it:%2d' % it,'loss:',loss.eval({input:data},sess)
print
print "--filter:",filter.eval(sess).reshape(1,4),\
" bias:",bias.eval(sess)
print "--y1:",y1.eval({input:data},sess),\
" y2:",y2.eval({input:data},sess),\
"loss:",loss.eval({input:data},sess)
print "--filter gradient:",tf.gradients(loss,filter)[0].eval({input:data},sess).reshape(1,4),\
" bias gradient:",tf.gradients(loss,bias)[0].eval({input:data},sess).reshape(1,1)
print
def train(loss):
step = tf.Variable(0, trainable=False)
rate = tf.train.exponential_decay(0.2, step, 1, 1)
optimizer = tf.train.GradientDescentOptimizer(rate)
train_op = optimizer.minimize(loss, global_step=step)
return train_op
def ckpt(sess):
ckpt_path = 'models'
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(ckpt_path)
if ckpt and ckpt.model_checkpoint_path:
print('--Restore the model from checkpoint %s ...' % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
start_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
else:
sess.run(tf.global_variables_initializer())
start_step = 0
print('--start training from step %d ...' % start_step)
return start_step, ckpt_path, saver
def summary(sess):
summary_path = 'summary'
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(summary_path,sess.graph)
return summary_op, summary_writer, summary_path
data = np.array([[0.1,0.2],[0.3,0.4]])
data = np.reshape(data,(1,2,2,1))
input = tf.placeholder(tf.float64, [1,2,2,1])
predict = net(input)
loss = tf.reduce_mean(tf.square(1-predict))
tf.summary.scalar('loss', loss)
train_op = train(loss)
with tf.Session() as sess:
print('--trainable variables: %s' % tf.trainable_variables())
summary_op, summary_writer, summary_path = summary(sess)
start_step, ckpt_path, saver = ckpt(sess)
for it in range(start_step, start_step+21):
#display(sess)
sess.run(train_op, {input:data})
summary = sess.run(summary_op, {input:data})
summary_writer.add_summary(summary, it)
if it%10==0:
saver.save(sess, "models/model.ckpt", global_step=it)
print('--model saved in: %s, summary saved in: %s.' % (ckpt_path, summary_path))