Python
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
num_steps=801
defaccuracy(predictions,labels):
return(100.0*np.sum(np.argmax(predictions,1)==np.argmax(labels,1))
/predictions.shape[0])
withtf.Session(graph=graph)assession:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
forstepinrange(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_,l,predictions=session.run([optimizer,loss,train_prediction])
if(step%100==0):
print('Loss at step %d: %f'%(step,l))
print('Training accuracy: %.1f%%'%accuracy(
predictions,train_labels[:train_subset,:]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%'%accuracy(
valid_prediction.eval(),valid_labels))
print('Test accuracy: %.1f%%'%accuracy(test_prediction.eval(),test_labels))
注意第17行,第一个变量居然是一个下划线!
在查阅了一些资料之后,有人是这么描述单个下划线的使用场景:
在交互式的时候,保持上一次执行的结果
没看懂。原文如下:
To hold the result of the last executed statement in an interactive interpreter session. This precedent was set by the standard CPython interpreter, and other interpreters have followed suit
i18n的时候,自动加载翻译
forms.ValidationError(_("Please enter a correct username"))
1
forms.ValidationError(_("Please enter a correct username"))
可以直接忽略、丢弃的变量
我们再回原始代码看看:https://github.com/tensorflow/tensorflow/blob/3737ac321e67410bf061257d5f644eae8abbf79b/tensorflow/examples/udacity/2_fullyconnected.ipynb
Ctrl+F 搜索一下,可以看到下面的代码完全没使用这个变量。 因此在这里的作用,应该属于第三种: 你完全可以忽略。。。
今天又学习了一个小技巧...
本文为原创文章,转载请注明出处
原文链接:http://www.flyml.net/2016/11/09/purpose-of-the-single-underscore-variable-in-python/