使用slim读取Deeplab里的tfrecords



splits_to_sizes = {
        'train': 1000, # num of samples in images/training
        'val': 100, # num of samples in images/validation
}
_ITEMS_TO_DESCRIPTIONS = {
    'image': 'A color image of varying height and width.',
    'labels_class': ('A semantic segmentation label whose size matches image.'
                     'Its values range from 0 (background) to num_classes.'),
}
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/segmentation/class/format': tf.FixedLenFeature((), tf.string, default_value='png'),
  }

items_to_handlers = {
'image': tfexample_decoder.Image(image_key='image/encoded',format_key='image/format',channels=3),
'image_name': tfexample_decoder.Tensor('image/filename'),
'height': tfexample_decoder.Tensor('image/height'),
'width': tfexample_decoder.Tensor('image/width'),
'labels_class': tfexample_decoder.Image(image_key='image/segmentation/class/encoded',format_key='image/segmentation/class/format',channels=1),
  }
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
dataset = slim.dataset.Dataset(
      data_sources='/home/wangzihao/dataset/segdata2/tfrecord/train-00000-of-00004.tfrecord',
      reader=tf.TFRecordReader,
      decoder=decoder,
      num_samples=splits_to_sizes,
      items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
      num_classes=2,
      labels_to_names=True)
provider = slim.dataset_data_provider.DatasetDataProvider(
          dataset,
          num_readers=1,
          common_queue_capacity=20 ,
          common_queue_min=10 )

#image, label, image_name, height, width = _get_data(provider,dataset_split)

#[image, label] = provider.get(['image', 'labels_class'])
image = provider.get(['image'])
#image = tf.expand_dims(image, 0)
#image = tf.image.resize_images(image, [512,512],method=tf.image.ResizeMethod.BILINEAR,align_corners=False)
#image = tf.decode_raw(img_features['image/encoded'], tf.float32)
image = tf.reshape(image, tf.stack([256, 256,3]))
#label = tf.reshape(label, tf.stack([512, 512, 3]))

#images = tf.train.batch([image],batch_size=1,num_threads=1,capacity=1)
#images, labels = tf.train.batch([image, label],batch_size=10,num_threads=1,capacity=5 * 10)
with tf.Session() as sess:
    
  coord = tf.train.Coordinator()
  threads = tf.train.start_queue_runners(sess, coord)
  i = 0
  try:
      while not coord.should_stop() and i<1:
          iamges2= sess.run([image])
       
          res2=np.array(iamges2)
          res2=res2.reshape(256,256,3)
          cv2.imwrite('iamge.png',res2)
        
          i += 1
  except tf.errors.OutOfRangeError:
      print("done")
  finally:
      coord.request_stop()
  coord.join(threads)

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值