一、深度学习框架
TensorFlow 1.x
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.Variable(1,trainable=True)
tf.placeholder((None,3), dtype=tf.float32)
tf.range(10)
tf.one_hot(X,10)
tf.zeros((2,3),dtype=tf.int32)
tf.ones((2,3),dtype=tf.int32)
tf.fill((2,3),dtype=tf.float32)
tf.constant([1,2])
tf.random_uniform((2,3,5),seed=1)
tf.random_normal((2,3,5),stddev=1.5)
tf.truncated_normal((2,3,5),stddev=1.5)
tf.add(X1,X2)
tf.add_n([X1,X2,X3])
tf.subtract(X1,X2)
tf.multiply(X1,X2)
tf.square(X)
tf.matmul(X,W)
tf.transpose(X)
tf.clip_by_value(X,min_,max_)
tf.assign(x,10)
tf.reduce_sum(X)
tf.reduce_mean(X)
tf.argmax(X,1)
tf.greater(X1,X2)
tf.less(X1,X2)
tf.equal(X1,X2)
tf.where(tf.greater(X1,X2),Y1,Y2)
tf.cond(cond,func_1,func_2)
tf.while_loop(cond,body,variables)
tf.shape(X)
tf.expand_dims(X,axis=0)
tf.squeeze(X)
tf.global_variables()
tf.trainable_variables()
tf.cast(X,tf.float32)
tf.map_fn(func,elems=X)
tf.gather(X,indicies)
tf.split(X,5,axis=0)
tf.concat(*outputs,axis=0)
tf.sequence_mask(mask_arr,max_len)
tf.boolean_mask(X,mask)
ema = tf.train.ExponentialMovingAverage(0.97)
ema.apply([w1,w2,w3])
ema.average(w1)
dataset = tf.data.Dataset.from_tensor_slices(data)
dataset.shuffle(1000).batch(100)
iterator = tf.data.Iterator.from_structure(dataset.output_types,dataset.output_shapes)
iterator.make_initializer(dataset)
iterator.get_next()
tf.app.flags.DEFINE_integer("t",7,"")
tf.app.flags.DEFINE_float("t",7.4,"")
tf.app.flags.DEFINE_boolean("t",True,"")
tf.app.flags.DEFINE_string("t","te","")
tf.app.flags.FLAGS.t
tf.app.flags.FLAGS.__flags
tf.app.flags.mark_flag_as_required()
tf.app.run()
$ python code.py --t=7 --h=True
hparams = tf.contrib.training.HParams(lr=0.1)
hparams.batch_size = 32
hparams.values()
hparams.to_json()
_integers,_floats,_bytes = [1,1,1],[2,2,2],[b'3',b'3',b'3']
writer = tf.python_io.TFRecordWriter(r'/data.tfrecords')
for i in range(len(_integers)):
features = tf.train.Features(feature={
'_integers':tf.train.Feature(int64_list=tf.train.Int64List(value=[_integers[i]])),
'_floats':tf.train.Feature(float_list=tf.train.FloatList(value=[_floats[i]])),
'_bytes':tf.train.Feature(bytes_list=tf.train.BytesList(value=[_bytes[i]]))})
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
writer.close()
ex = next(tf.python_io.tf_record_iterator(r'/data.tfrecords'))
print(tf.train.Example.FromString(ex))
reader = tf.TFRecordReader()
queue = tf.train.string_input_producer([r'/data.tfrecords'])
_, serialized_example = reader.read(queue)
features = tf.parse_single_example(serialized_example,features={
'_integers':tf.FixedLenFeature([],tf.int64),
'_floats':tf.FixedLenFeature([],tf.float32)})
_integers = tf.cast(features['_integers'], tf.int64)
_floats = tf.cast(features['_floats'], tf.float32)
with tf.Session() as sess:
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(3):
print(_integers.eval())
coord.request_stop()
coord.join(threads=threads)
tf.summary.histogram(name,variable)
tf.summary.scalar(name,scalar)
tf.summary.image(name,image)
tf.summary.text(name,text)
tf.summary.audio(name,audio)
summary_op = tf.summary.merge_all()
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary,_ = sess.run((summary_op,train_op),feed_dict=feed_dict,options=options,run_metadata=run_metada)
writer = tf.summary.FileWriter('/logs',tf.get_default_graph())
writer.add_run_metadata(run_metadata,'step %03d'%i)
writer.add_summary(summary,i)
writer.close()
$ tensorboard --logdir='D:/logs'
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
sess = tf.Session(config=config)
sess = tf.InteractiveSession(config=config)
sess.run(tf.global_variables_initializer)
print(x.eval())
sess.close()
with tf.name_scope('A'):
with tf.variable_scope('B',reuse=tf.AUTO_REUSE):
a = tf.Variable(1,name='a')
b = tf.get_variable(name='b',shape=(1),initializer=tf.zeros_initializer)
tf.zeros_initializer
tf.ones_initializer
tf.constant_initializer(2)
tf.random_uniform_initializer
tf.random_normal_initializer
tf.truncated_normal_initializer
with tf.variable_scope('',reuse=True):
x2 = tf.get_variable(name='W/X')
graph = tf.Graph()
graph.device('/gpu:0')
with graph.as_default():
x = tf.constant([1,2,3])
sess = tf.Session(graph=graph)
sess.graph.finalize()
tf.reset_default_graph()
from tensorflow.examples.tutorials.minist import input_data
mnist = input_data.read_data_sets('/MINIST_data/', one_hot=True)
X_train,y_train,X_test,y_test = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
mnist.train.next_batch(64)
config = tf.ConfigProto(log_device_placement=True,
allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess = tf.Session(config=config)
with tf.device('/cpu:0'):
x = tf.constant([1,2,3])
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
$ CUDA_VISIBLE_DEVICES='0,2' python code.py
cluster = tf.train.ClusterSpec({
'ps':['tf-ps0:2222',
'tf-ps1:2222']})
'worker':['tf-worker0:2222',
'tf-worker1:2222',
'tf-worker2:2222'],
server = tf.train.Server(cluster, job_name='ps', task_index=0)
server = tf.train.Server.create_local_server()
server.start()
server.join()
sess = tf.Session(target=server.target)
device_setter = tf.train.replica_device_setter(work_device='/job:worker/task:0',cluster=cluster)
optimizer = tf.train.SyncReplicasOptimizer(
tf.train.GradientDescentOptimizer(learning_rate),
replicas_to_aggregate=n_workers,
total_num_replicas=n_workers)
sync_replicas_hook = optimizer.make_session_run_hook(is_chief)
with tf.device(device_setter):
is_chief = (TASK_ID == 0)
global_step,loss,train_op = build_model(x,y_,is_chief)
hooks = [sync_replicas_hook,
tf.train.StopAtStepHook(last_step=TRAINING_STEPS)]
with tf.train.MonitoredTrainingSession(
master=server.target,
is_chief=is_chief,
checkpoint_dir=MODEL_SAVE_PATH,
hooks=hooks,
save_checkpoint_secs=60,
config=config) as sess:
while not sess.should_stop():
x,y = data.next_batch(BATCH_SIZE)
l,step,_ = sess.run((loss,train_op,global_step),feed_dict=feed_dict(x,y))
tf.nn.tanh(Z)
tf.nn.relu(Z)
tf.nn.sigmoid(Z)
tf.nn.softmax(Z)
tf.nn.softmax_cross_entropy_with_logits(logits=y,labels=y_)
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=y_)
tf.train.GradientDescentOptimizer(learning_rate=0.01)
tf.train.AdamOptimizer(learning_rate=0.01)
tf.train.MomentumOptimizer(learning_rate=0.01)
train_op = Optimizer.minimize(loss)
grads = Optimizer.compute_gradients(loss)
for i, (g, v) in enumerate(grads):
if g is not None:
grads[i] = (tf.clip_by_norm(g, clipping_theta), v)
train_op = Trainer.apply_gradients(grads)
tf.nn.embedding_lookup(embedding,data)
tf.nn.conv2d(X,filter=[1,2,2,1],strides=[1,1,1,1],padding='SAME')
tf.nn.bias_add(X,bias)
tf.nn.relu(X)
tf.nn.moments(X,axes=[0,1])
tf.nn.max_pool(X,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')
tf.nn.avg_pool(X,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')
tf.nn.dropout(X, keep_prob