splits_to_sizes = {
'train': 1000, # num of samples in images/training
'val': 100, # num of samples in images/validation
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'labels_class': ('A semantic segmentation label whose size matches image.'
'Its values range from 0 (background) to num_classes.'),
}
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/segmentation/class/format': tf.FixedLenFeature((), tf.string, default_value='png'),
}
items_to_handlers = {
'image': tfexample_decoder.Image(image_key='image/encoded',format_key='image/format',channels=3),
'image_name': tfexample_decoder.Tensor('image/filename'),
'height': tfexample_decoder.Tensor('image/height'),
'width': tfexample_decoder.Tensor('image/width'),
'labels_class': tfexample_decoder.Image(image_key='image/segmentation/class/encoded',format_key='image/segmentation/class/format',channels=1),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
dataset = slim.dataset.Dataset(
data_sources='/home/wangzihao/dataset/segdata2/tfrecord/train-00000-of-00004.tfrecord',
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=splits_to_sizes,
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=2,
labels_to_names=True)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=1,
common_queue_capacity=20 ,
common_queue_min=10 )
#image, label, image_name, height, width = _get_data(provider,dataset_split)
#[image, label] = provider.get(['image', 'labels_class'])
image = provider.get(['image'])
#image = tf.expand_dims(image, 0)
#image = tf.image.resize_images(image, [512,512],method=tf.image.ResizeMethod.BILINEAR,align_corners=False)
#image = tf.decode_raw(img_features['image/encoded'], tf.float32)
image = tf.reshape(image, tf.stack([256, 256,3]))
#label = tf.reshape(label, tf.stack([512, 512, 3]))
#images = tf.train.batch([image],batch_size=1,num_threads=1,capacity=1)
#images, labels = tf.train.batch([image, label],batch_size=10,num_threads=1,capacity=5 * 10)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
i = 0
try:
while not coord.should_stop() and i<1:
iamges2= sess.run([image])
res2=np.array(iamges2)
res2=res2.reshape(256,256,3)
cv2.imwrite('iamge.png',res2)
i += 1
except tf.errors.OutOfRangeError:
print("done")
finally:
coord.request_stop()
coord.join(threads)
使用slim读取Deeplab里的tfrecords
最新推荐文章于 2019-07-23 15:50:45 发布