创建constant
constant_v = tf.constant(数值, name='constant_v')
#example
X = tf.constant(np.random.randn(3,1),name = 'X')
y = tf.constant(39, name='y')
创建变量
variable_v = tf.Variable(算式, name='variable_v')
#example
loss = tf.Variable((y - y_hat)**2, name='loss')
tf.global_variables_initializer()
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
placeholder
#整数
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
#矩阵
X = tf.placeholder(tf.float32, [n_x,None],name='X')
Y = tf.placeholder(tf.float32, [n_y,None],name='Y')
Session
1.
sess = tf.Session()
result = sess.run(init)
sess.close()
- 2.
with tf.Session() as session:
session.run(init)
One hot
labels = np.array([1,2,3,0,2,1])
one_hot = tf.one_hot(labels, C, axis = 0) # axis为0按列展开为one hot,为1按行展开为one hot
#中间插入session运算one_hot
print ("one_hot = " + str(one_hot))
结果:
one_hot = [[0. 0. 0. 1. 0. 0.]
[1. 0. 0. 0. 0. 1.]
[0. 1. 0. 0. 1. 0.]
[0. 0. 1. 0. 0. 0.]]
reduce_mean
tf.reduce_mean(input_tensor, axis=None, keep_dims=False, name=None, reduction_indices=None)
#如果不设置axis,所有维度上的元素都会被求平均值,并且只会返回一个只有一个元素的张量。
Cost
#用reduce_mean求均值
cost_sigmoid = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y))
cost_softmax = tf.nn.softmax_cross_entropy_with_logits_v2(logits = z, labels = y)
Flatten image
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
# -1代表不管X所有后面的排列方式,只将X的所有元素平均排成X.shape[0]行
# X_train_orig.shape = (例子个数m,像素行数pixel_row,像素列数pixel_column,RBG通道3)
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# flatten后 每一列为 pixel_row*pixel_column*3个元素,共m列
#此处one hot只指将Y改为one hot形式,并非实际代码
Y_train = one_hot(Y_train_orig, depth)
Y_test = one_hot(Y_test_orig, depth)
Initialize parameters
tf.set_random_seed(1)
#xavier初始化weight矩阵
W1 = tf.get_variable('W1', [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
#0初始化bias项
b1 = tf.get_variable('b1', [25,1], initializer = tf.zeros_initializer())
Relu Sigmoid
A = tf.nn.relu(Z)
sigmoid_x = tf.sigmoid(x)
ops.reset_default_graph()
to be able to rerun the model without overwriting tf variables
optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
#_扔掉,c为cost值
argmax
argmax有两个参数,第一个为矩阵,第二个axis,它返回最大值的索引,axis默认为0,按列返回最大值索引
Predict
correct_prediction = tf.equal(tf.argmax(ZL), tf.argmax(Y))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#tf.cast()转换类型
print("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))