python dataset. mnist_在TensorF中读取与mnist dataset格式相同的新数据集

我有一个深度学习模型,我必须输入100X100大小的图像。我得到的数据是

列车图像-x_train (530,100,100),

火车标签-y_train (530,4)

测试图像-x_test(89,100,100),

测试标签-y_test(89,4)。在

使用-mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

它会产生这样的东西-

^{pr2}$

我必须将我的数据转换成同样的格式,这样就可以使用我的代码了。请帮忙epochs = 20

batch_size = 100

image_vector = 28*28

for i in range(epochs):

training_accuracy = []

epoch_loss = []

for ii in tqdm(range(mnist.train.num_examples // batch_size)):

batch = mnist.train.next_batch(batch_size)

images = batch[0].reshape((-1, 28, 28))

targets = batch[1]

c, _, a = session.run([model.cost, model.opt, model.accuracy], feed_dict={model.inputs: images, model.targets:targets})

epoch_loss.append(c)

training_accuracy.append(a)

print("Epoch: {}/{}".format(i, epochs), " | Current loss: {}".format(np.mean(epoch_loss)),

" | Training accuracy: {:.4f}%".format(np.mean(training_accuracy)))

编辑1:

按照建议,我做了以下事情-num_examples=271

batch_size=10

buffer_size=271

num_cpu_cores=4

dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))

dataset = dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat()

#dataset = dataset.apply(tf.data.batch( batch_size=batch_size, num_parallel_batches=num_cpu_cores))

#batch1=dataset.batch(10)

iterator = dataset.make_one_shot_iterator()

sess = tf.Session()

sess.run(tf.global_variables_initializer())

for ii in tqdm(range(num_examples // batch_size)):

batch = iterator.get_next()

images = batch[0]

targets = batch[1]

c, _, a = sess.run([model.cost, model.opt, model.accuracy])

epoch_loss.append(c)

training_accuracy.append(a)

print("Epoch: {}/{}".format(i, epochs), " | Current loss: {}".format(np.mean(epoch_loss)),

" | Training accuracy: {:.4f}%".format(np.mean(training_accuracy)))

图像和目标不是批处理,而是单个图像和标签。在(,

)

请建议如何批处理数据并输入sess.运行在

编辑2:

这是算法的全部代码-def LSTM_layer(lstm_cell_units, number_of_layers, batch_size, dropout_rate=0.8):

'''

This method is used to create LSTM layer/s for PixelRNN

Input(s): lstm_cell_unitis - used to define the number of units in a LSTM layer

number_of_layers - used to define how many of LSTM layers do we want in the network

batch_size - in this method this information is used to build starting state for the network

dropout_rate - used to define how many cells in a layer do we want to 'turn off'

Output(s): cell - lstm layer

init_state - zero vectors used as a starting state for the network

'''

#layer = tf.contrib.rnn.BasicLSTMCell(lstm_cell_units)

layer = tf.nn.rnn_cell.LSTMCell(lstm_cell_units,name='basic_lstm_cell')

if dropout_rate != 0:

layer = tf.contrib.rnn.DropoutWrapper(layer, dropout_rate)

cell = tf.contrib.rnn.MultiRNNCell([layer]*number_of_layers)

init_size = cell.zero_state(batch_size, tf.float32)

return cell, init_size

def rnn_output(lstm_outputs, input_size, output_size):

'''

Output layer for the lstm netowrk

Input(s): lstm_outputs - outputs from the RNN part of the network

input_size - in this case it is RNN size (number of neuros in RNN layer)

output_size - number of neuros for the output layer == number of classes

Output(s) - logits,

'''

outputs = lstm_outputs[:, -1, :]

weights = tf.Variable(tf.random_uniform([input_size, output_size]), name='rnn_out_weights')

bias = tf.Variable(tf.zeros([output_size]), name='rnn_out_bias')

output_layer = tf.matmul(outputs, weights) + bias

return output_layer

def loss_optimizer(rnn_out, targets, learning_rate):

'''

Function used to calculate loss and minimize it

Input(s): rnn_out - logits from the fully_connected layer

targets - targets used to train network

learning_rate/step_size

Output(s): optimizer - optimizer of choice

loss - calculated loss function

'''

loss = tf.nn.softmax_cross_entropy_with_logits(logits=rnn_out, labels=targets)

optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

return optimizer, loss

class PixelRNN(object):

def __init__(self, learning_rate=0.001, batch_size=10, classes=4, img_size = (129, 251), lstm_size=64,

number_of_layers=1, dropout_rate=0.6,clip_rate=None):

'''

PixelRNN - call this class to create whole model

Input(s): learning_rate - how fast are we going to move towards global minima

batch_size - how many samples do we feed at ones

classes - number of classes that we are trying to recognize

img_size - width and height of a single image

lstm_size - number of neurons in a LSTM layer

number_of_layers - number of RNN layers in the PixelRNN

dropout_rate - % of cells in a layer that we are stopping gradients to flow through

'''

#This placeholders are just for images

self.inputs = tf.placeholder(tf.float32, [None, img_size[0], img_size[1]], name='inputs')

self.targets = tf.placeholder(tf.int32, [None, classes], name='targets')

cell, init_state = LSTM_layer(lstm_size, number_of_layers, batch_size, dropout_rate)

outputs, states = tf.nn.dynamic_rnn(cell, self.inputs, initial_state=init_state)

rnn_out = rnn_output(outputs, lstm_size, classes)

self.opt, self.cost = loss_optimizer(rnn_out, self.targets, learning_rate)

predictions = tf.nn.softmax(rnn_out)

currect_pred = tf.equal(tf.cast(tf.round(tf.argmax(predictions, 1)), tf.int32), tf.cast(tf.argmax(self.targets, 1), tf.int32))

self.accuracy = tf.reduce_mean(tf.cast(currect_pred, tf.float32))

self.predictions = tf.argmax(tf.nn.softmax(rnn_out), 1)

tf.reset_default_graph()

model = PixelRNN()

num_examples=271

batch_size=10

buffer_size=271

num_cpu_cores=4

dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))

dataset = dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat()

#dataset = dataset.apply(tf.data.batch( batch_size=batch_size, num_parallel_batches=num_cpu_cores))

dataset = dataset.batch(10) # apply batch to dataset

iterator = dataset.make_one_shot_iterator() # create iterator

sess = tf.Session()

sess.run(tf.global_variables_initializer())

for ii in tqdm(range(num_examples // batch_size)):

batch = iterator.get_next() #run iterator

images = batch[0]

targets = batch[1]

c, _, a = sess.run([model.cost, model.opt, model.accuracy],feed_dict={model.inputs: images, model.targets:targets})

epoch_loss.append(c)

training_accuracy.append(a)

print("Epoch: {}/{}".format(i, epochs), " | Current loss: {}".format(np.mean(epoch_loss)),

" | Training accuracy: {:.4f}%".format(np.mean(training_accuracy)))

当我照建议做时,我得到了以下错误-TypeError: The value of a feed cannot be a tf.Tensor object.

Acceptable feed values include Python scalars, strings, lists, numpy

ndarrays, or TensorHandles.For reference, the tensor object was

Tensor("IteratorGetNext:0", shape=(?, 129, 251), dtype=float32) which

was passed to the feed with key Tensor("inputs:0", shape=(?, 129,

251), dtype=float32).

不知道这里出了什么问题

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值