TF:程序设计1

一个简单的TF程序设计样例

#encoding=utf-8

import tensorflow as tf
import numpy as np
 
def net(input):
	global filter, bias, y1, y2
	init_random = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None, dtype=tf.float64)
	filter = tf.get_variable('filter', shape=[2,2,1,1], initializer=init_random, dtype=tf.float64)
	bias = tf.Variable([0], dtype=tf.float64, name='bias')
	y1 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
	y2 = tf.nn.sigmoid(y1 + bias)
	return y2
 
def display(sess):
	#print '--it:%2d' % it,'loss:',loss.eval({input:data},sess)
	print
	print	"--filter:",filter.eval(sess).reshape(1,4),\
		" bias:",bias.eval(sess)
	print	"--y1:",y1.eval({input:data},sess),\
		" y2:",y2.eval({input:data},sess),\
		"loss:",loss.eval({input:data},sess)
	print	"--filter gradient:",tf.gradients(loss,filter)[0].eval({input:data},sess).reshape(1,4),\
		" bias gradient:",tf.gradients(loss,bias)[0].eval({input:data},sess).reshape(1,1)
	print

def train(loss):
	step = tf.Variable(0, trainable=False)
	rate = tf.train.exponential_decay(0.2, step, 1, 1)
	optimizer = tf.train.GradientDescentOptimizer(rate)
	train_op = optimizer.minimize(loss, global_step=step)
	return train_op

def ckpt(sess):
	ckpt_path = 'models'
	saver = tf.train.Saver()
	ckpt = tf.train.get_checkpoint_state(ckpt_path)
	if ckpt and ckpt.model_checkpoint_path:
		print('--Restore the model from checkpoint %s ...' % ckpt.model_checkpoint_path)
		saver.restore(sess, ckpt.model_checkpoint_path)
		start_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
	else:
		sess.run(tf.global_variables_initializer())
		start_step = 0
	print('--start training from step %d ...' % start_step)
	return start_step, ckpt_path, saver

def summary(sess):
	summary_path = 'summary'
	summary_op = tf.summary.merge_all()
	summary_writer = tf.summary.FileWriter(summary_path,sess.graph)
	return summary_op, summary_writer, summary_path

data = np.array([[0.1,0.2],[0.3,0.4]])
data = np.reshape(data,(1,2,2,1))
input = tf.placeholder(tf.float64, [1,2,2,1])
predict = net(input)
loss = tf.reduce_mean(tf.square(1-predict))
tf.summary.scalar('loss', loss)
train_op = train(loss)

with tf.Session() as sess:
	print('--trainable variables: %s' % tf.trainable_variables())
	summary_op, summary_writer, summary_path = summary(sess)
	start_step, ckpt_path, saver = ckpt(sess)
	for it in range(start_step, start_step+21):
		#display(sess)
		sess.run(train_op, {input:data})
		summary = sess.run(summary_op, {input:data})
		summary_writer.add_summary(summary, it)
		if it%10==0:
			saver.save(sess, "models/model.ckpt", global_step=it)
	print('--model saved in: %s, summary saved in: %s.' % (ckpt_path, summary_path))

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值