随心一记(没什么营养)
tf.abs
求绝对值的
tf.add()
相加的 支持broadacst
tf.negative()
取反的
理由有这样的需求 求两个tensor的相减的结果
tf.add(tensor1,tf.negtive(temsor2))
机器学习中的NN 实现出处:
import tensorflow as tf
import numpy as np
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../mnist_data/", one_hot=True)
# In this example, we limit mnist data# In th
Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) 注意这里返回是 arr
Xte, Yte = mnist.test.next_batch(200) #200 for testing
xtr=tf.placeholder("float",[None,784])
xte=tf.placeholder("float",[784])
distance=tf.reduce_sum(tf.abs(tf.add(xtr,tf.negative(xte))),axis=1)
nn_index=tf.argmin(distance,axis=0)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
accuracy=0.0
# loop over test data
for i in range(len(Xte)):
# Get nearest neighbor
a = sess.run(nn_index, feed_dict={xtr: Xtr, xte: Xte[i, :]}) #注意在 feed_dict里面 的变量上下文操作
# Get nearest neighbor class label and compare it to its true label
print("Test", i, "Prediction:", np.argmax(Ytr[a]), \
"True Class:", np.argmax(Yte[i]))
# Calculate accuracy
if np.argmax(Ytr[a]) == np.argmax(Yte[i]):
accuracy += 1./len(Xte)
print("Done!")
print("Accuracy:", accuracy)