java整形占位符_占位符值不正确

我正在尝试执行以下代码,该代码在Tensorflow中使用MNIST数据集,形状为28 * 28 = 784和10个类(0-9位)的图像作为输出,我收到的错误显示如下:

InvalidArgumentError:您必须使用dtype float和shape [?,10]为占位符张量'Placeholder_33'提供值

# Import MNIST data

#import input_data

#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

import tensorflow as tf

# Set parameters

learning_rate = 0.01

training_iteration = 30

batch_size = 100

display_step = 2

# TF graph input

x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784

y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes

# Create a model

# Set model weights

W = tf.Variable(tf.zeros([784, 10]))

b = tf.Variable(tf.zeros([10]))

with tf.name_scope("Wx_b") as scope:

# Construct a linear model

model = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax

# Add summary ops to collect data

w_h = tf.summary.histogram("weights", W)

b_h = tf.summary.histogram("biases", b)

# More name scopes will clean up graph representation

with tf.name_scope("cost_function") as scope:

# Minimize error using cross entropy

# Cross entropy

cost_function = -tf.reduce_sum(y*tf.log(model))

# Create a summary to monitor the cost function

tf.summary.scalar("cost_function", cost_function)

with tf.name_scope("train") as scope:

# Gradient descent

optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)

# Initializing the variables

init = tf.initialize_all_variables()

# Merge all summaries into a single operator

merged_summary_op = tf.summary.merge_all()

# Launch the graph

with tf.Session() as sess:

sess.run(init)

summary_writer = tf.summary.FileWriter('/home/raed/Tensorflow/tensorflow_demo', graph_def=sess.graph_def)

# Training cycle

for iteration in range(training_iteration):

avg_cost = 0.

total_batch = int(mnist.train.num_examples/batch_size)

# Loop over all batches

for i in range(total_batch):

batch_xs, batch_ys = mnist.train.next_batch(batch_size)

# Fit training using batch data

sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})

# Compute the average loss

avg_cost += sess.run(cost_function, feed_dict={x: batch_xs, y: batch_ys})/total_batch

# Write logs for each iteration

summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})

summary_writer.add_summary(summary_str, iteration*total_batch + i)

# Display logs per iteration step

if iteration % display_step == 0:

print ("Iteration:" "%04d" % (iteration + 1), "cost=", "{:.9f}".format(avg_cost))

print ("Tuning completed!")

# Test the model

predictions = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))

# Calculate accuracy

accuracy = tf.reduce_mean(tf.cast(predictions, "float"))

print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值