tensorflow 遇到的问题

1,问题1: 读的bboxes的值为空

原代码:

  def __init__(self,
               load_instance_masks=False,
               instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,
               label_map_proto_file=None,
               use_display_name=False,
               dct_method=''):
    """Constructor sets keys_to_features and items_to_handlers.

    Args:
      load_instance_masks: whether or not to load and handle instance masks.
      instance_mask_type: type of instance masks. Options are provided in
        input_reader.proto. This is only used if `load_instance_masks` is True.
      label_map_proto_file: a file path to a
        object_detection.protos.StringIntLabelMap proto. If provided, then the
        mapped IDs of 'image/object/class/text' will take precedence over the
        existing 'image/object/class/label' ID.  Also, if provided, it is
        assumed that 'image/object/class/text' will be in the data.
      use_display_name: whether or not to use the `display_name` for label
        mapping (instead of `name`).  Only used if label_map_proto_file is
        provided.
      dct_method: An optional string. Defaults to None. It only takes
        effect when image format is jpeg, used to specify a hint about the
        algorithm used for jpeg decompression. Currently valid values
        are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
        example, the jpeg library does not have that specific option.

    Raises:
      ValueError: If `instance_mask_type` option is not one of
        input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or
        input_reader_pb2.PNG_MASKS.
    """
    self.keys_to_features = {
        'image/encoded':  tf.FixedLenFeature((), tf.string, default_value=''),
        'image/format':   tf.FixedLenFeature((), tf.string, default_value='png'),
        # 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),
        # 'image/key/sha256': tf.FixedLenFeature((), tf.string, default_value=''),
        # 'image/source_id':  tf.FixedLenFeature((), tf.string, default_value=''),
        'image/height':     tf.FixedLenFeature((), tf.int64, 1),
        'image/width':      tf.FixedLenFeature((), tf.int64, 1),
        # # Object boxes and classes.
        'image/object/bbox/xmin':  tf.VarLenFeature(tf.float32),
        'image/object/bbox/xmax':  tf.VarLenFeature(tf.float32),
        'image/object/bbox/ymin':  tf.VarLenFeature(tf.float32),
        'image/object/bbox/ymax':  tf.VarLenFeature(tf.float32),
        'image/object/class/label':  tf.VarLenFeature(tf.int64),
        'image/object/class/object_name':    tf.VarLenFeature(tf.string),
        'image/object/grasp_point/negative_points': tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/negative_tan': tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/positive_points': tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/positive_tan': tf.VarLenFeature(tf.float32),
        # 'image/object/area':          tf.VarLenFeature(tf.float32),
        'image/object/is_crowd':      tf.VarLenFeature(tf.int64),
        # 'image/object/difficult':     tf.VarLenFeature(tf.int64),
        # 'image/object/group_of':      tf.VarLenFeature(tf.int64),
        'image/object/weight':        tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/neg_griper_lengths_grasp_orientation': tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/neg_griper_widths': tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/post_griper_lengths_grasp_orientation': tf.VarLenFeature(tf.float32),
        'image/object/grasp_point/pos_griper_widths': tf.VarLenFeature(tf.float32),


    }
    if dct_method:
      image = slim_example_decoder.Image(
          image_key='image/encoded',
          format_key='image/format',
          channels=3,
          dct_method=dct_method)
    else:
      image = slim_example_decoder.Image(image_key='image/encoded', format_key='image/format', channels=3)
    self.items_to_handlers = {
        fields.InputDataFields.image: image,
        fields.InputDataFields.source_id: ( slim_example_decoder.Tensor('image/object/class/label')),
        # fields.InputDataFields.key: ( slim_example_decoder.Tensor('image/key/sha256')),
        # fields.InputDataFields.filename: ( slim_example_decoder.Tensor('image/object/class/object_name')),
        # Object boxes and classes.
        fields.InputDataFields.groundtruth_boxes: ( slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],'image/object/bbox/')),
        fields.InputDataFields.negative_points:  ( slim_example_decoder.Tensor('image/object/grasp_point/negative_points')),
        fields.InputDataFields.negative_tan: ( slim_example_decoder.Tensor('image/object/grasp_point/negative_tan')),
        fields.InputDataFields.positive_points: ( slim_example_decoder.Tensor('image/object/grasp_point/positive_points')),
        fields.InputDataFields.positive_tan: ( slim_example_decoder.Tensor('image/object/grasp_point/positive_tan')),
        # fields.InputDataFields.groundtruth_area:  slim_example_decoder.Tensor('image/object/area'),
        fields.InputDataFields.groundtruth_is_crowd: ( slim_example_decoder.Tensor('image/object/is_crowd')),
        # fields.InputDataFields.groundtruth_difficult: ( slim_example_decoder.Tensor('image/object/difficult')),
        # fields.InputDataFields.groundtruth_group_of: ( slim_example_decoder.Tensor('image/object/group_of')),
        fields.InputDataFields.groundtruth_weights: ( slim_example_decoder.Tensor('image/object/weight')),
        fields.InputDataFields.neg_griper_lengths_grasp_orientation: (slim_example_decoder.Tensor('image/object/grasp_point/neg_griper_lengths_grasp_orientation')),
        fields.InputDataFields.neg_griper_widths: (slim_example_decoder.Tensor('image/object/grasp_point/neg_griper_widths')),
        fields.InputDataFields.post_griper_lengths_grasp_orientation: (slim_example_decoder.Tensor('image/object/grasp_point/post_griper_lengths_grasp_orientation')),
        fields.InputDataFields.pos_griper_widths: (slim_example_decoder.Tensor('image/object/grasp_point/pos_griper_widths'))

    }

    label_handler = slim_example_decoder.Tensor('image/object/class/label')
    self.items_to_handlers[fields.InputDataFields.groundtruth_classes] = label_handler   #here restore the groundtruth_classes

  def decode(self, tf_example_string_tensor):
    """Decodes serialized tensorflow example and returns a tensor dictionary.

    Args:
      tf_example_string_tensor: a string tensor holding a serialized tensorflow
        example proto.

    Returns:
      A dictionary of the following tensors.
      fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]
        containing image.
      fields.InputDataFields.source_id - string tensor containing original
        image id.
      fields.InputDataFields.key - string tensor with unique sha256 hash key.
      fields.InputDataFields.filename - string tensor with original dataset
        filename.
      fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape
        [None, 4] containing box corners.
      fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape
        [None] containing classes for the boxes.
      fields.InputDataFields.groundtruth_weights - 1D float32 tensor of
        shape [None] indicating the weights of groundtruth boxes.
      fields.InputDataFields.num_groundtruth_boxes - int32 scalar indicating
        the number of groundtruth_boxes.
      fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape
        [None] containing containing object mask area in pixel squared.
      fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape
        [None] indicating if the boxes enclose a crowd.

    Optional:
      fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape
        [None] indicating if the boxes represent `difficult` instances.
      fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape
        [None] indicating if the boxes represent `group_of` instances.
      fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of
        shape [None, None, None] containing instance masks.
    """
    serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
    decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
                                                    self.items_to_handlers)
    keys = decoder.list_items()
    tensors = decoder.decode(serialized_example, items=keys)
    tensor_dict = dict(zip(keys, tensors))
    is_crowd = fields.InputDataFields.groundtruth_is_crowd
    tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
    tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])
    tensor_dict[fields.InputDataFields.num_groundtruth_boxes] = tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]

    def default_groundtruth_weights():
      return tf.ones([tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]], dtype=tf.float32)

    tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(
        tf.greater(
            tf.shape(
                tensor_dict[fields.InputDataFields.groundtruth_weights])[0],
            0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],
        default_groundtruth_weights)
    return tensor_dict

输出值:

2018-04-17 01:25:52.602424: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1060 6GB, pci bus id: 0000:01:00.0, compute capability: 6.1)
[[ 121. -350.  266. -249.]]
['positive_tan', 'groundtruth_is_crowd_runtime_shapes', 'negative_tan', 'image', 'neg_griper_widths', 'neg_griper_widths_runtime_shapes', 'filename_runtime_shapes', 'neg_griper_lengths_grasp_orientation', 'num_groundtruth_boxes', 'groundtruth_weights', 'xmin', 'ymin', 'ymax', 'post_griper_lengths_grasp_orientation', 'groundtruth_classes', 'filename', 'positive_points_runtime_shapes', 'negative_points', 'image_name_runtime_shapes', 'groundtruth_boxes_runtime_shapes', 'num_groundtruth_boxes_runtime_shapes', 'xmax', 'positive_points', 'image_name', 'negative_points_runtime_shapes', 'pos_griper_widths_runtime_shapes', 'groundtruth_boxes', 'post_griper_lengths_grasp_orientation_runtime_shapes', 'groundtruth_is_crowd', 'xmax_runtime_shapes', 'ymax_runtime_shapes', 'image_runtime_shapes', 'neg_griper_lengths_grasp_orientation_runtime_shapes', 'xmin_runtime_shapes', 'positive_tan_runtime_shapes', 'groundtruth_weights_runtime_shapes', 'negative_tan_runtime_shapes', 'source_id', 'pos_griper_widths', 'ymin_runtime_shapes', 'source_id_runtime_shapes', 'groundtruth_classes_runtime_shapes']
('positive_tan', array([-0.11666667, -0.27868852, -0.7807647 , -0.08291111,  8.204     ,
        1.07552   , -0.2944    , -0.09111628], dtype=float32))
('groundtruth_is_crowd_runtime_shapes', array([0], dtype=int32))
('negative_tan', array([5.3333335, 2.4666667], dtype=float32))
('image', array([[[[113.,  79.,  87.],
         [113.,  81.,  87.],
         [113.,  83.,  84.],
         ...,
         [ 93.,  73.,  74.],
         [ 89.,  72.,  52.],
         [ 89.,  73.,  63.]],

        [[111.,  78.,  87.],
         [111.,  80.,  87.],
         [111.,  83.,  84.],
         ...,
         [ 96.,  72.,  42.],
         [ 92.,  71.,  52.],
         [ 92.,  72.,  63.]],

        [[110.,  79.,  88.],
         [110.,  81.,  88.],
         [110.,  82.,  86.],
         ...,
         [100.,  71.,  53.],
         [ 96.,  72.,  58.],
         [ 96.,  73.,  62.]],

        ...,

        [[228., 210., 242.],
         [229., 209., 242.],
         [230., 219., 238.],
         ...,
         [232., 218., 217.],
         [231., 217., 217.],
         [231., 219., 217.]],

        [[229., 209., 238.],
         [230., 212., 238.],
         [231., 214., 236.],
         ...,
         [232., 220., 219.],
         [231., 218., 220.],
         [231., 218., 228.]],

        [[229., 207., 234.],
         [230., 212., 234.],
         [231., 217., 234.],
         ...,
         [232., 218., 221.],
         [231., 218., 224.],
         [231., 218., 228.]]]], dtype=float32))
('neg_griper_widths', array([37.628563, 38.899605], dtype=float32))
('neg_griper_widths_runtime_shapes', array([2], dtype=int32))
('filename_runtime_shapes', array([], dtype=int32))
('neg_griper_lengths_grasp_orientation', array([32.55764, 39.92493], dtype=float32))
('num_groundtruth_boxes', 1)
('groundtruth_weights', array([1.], dtype=float32))
('xmin', array([243.], dtype=float32))
('ymin', array([299.], dtype=float32))
('ymax', array([353.], dtype=float32))
('post_griper_lengths_grasp_orientation', array([60.40695 , 63.324562, 43.13571 , 45.154404, 41.323593, 36.714638,
       46.909584, 43.178127], dtype=float32))
('groundtruth_classes', array([], dtype=int64))
('filename', 'bottle')
('positive_points_runtime_shapes', array([16], dtype=int32))
('negative_points', array([281.5   , 320.4245, 287.5   , 345.8685], dtype=float32))
('image_name_runtime_shapes', array([], dtype=int32))
('groundtruth_boxes_runtime_shapes', array([0, 4], dtype=int32))
('num_groundtruth_boxes_runtime_shapes', array([], dtype=int32))
('xmax', array([294.], dtype=float32))
('positive_points', array([277.5   , 325.0085, 278.5   , 325.752 , 276.5   , 298.773 ,
       279.    , 296.8655, 276.    , 297.51  , 279.5   , 296.056 ,
       278.    , 323.124 , 280.5   , 337.959 ], dtype=float32))
('image_name', 'pcd0931r.png')
('negative_points_runtime_shapes', array([4], dtype=int32))
('pos_griper_widths_runtime_shapes', array([8], dtype=int32))
('groundtruth_boxes', array([], shape=(0, 4), dtype=float32))
('post_griper_lengths_grasp_orientation_runtime_shapes', array([8], dtype=int32))
('groundtruth_is_crowd', array([], dtype=bool))
('xmax_runtime_shapes', array([1], dtype=int32))
('ymax_runtime_shapes', array([1], dtype=int32))
('image_runtime_shapes', array([  1, 480, 640,   3], dtype=int32))
('neg_griper_lengths_grasp_orientation_runtime_shapes', array([2], dtype=int32))
('xmin_runtime_shapes', array([1], dtype=int32))
('positive_tan_runtime_shapes', array([8], dtype=int32))
('groundtruth_weights_runtime_shapes', array([1], dtype=int32))
('negative_tan_runtime_shapes', array([2], dtype=int32))
('source_id', array([3]))
('pos_griper_widths', array([27.149261 , 30.561646 , 11.401754 , 12.0415945, 17.117243 ,
       19.104973 , 17.720045 , 22.090721 ], dtype=float32))
('ymin_runtime_shapes', array([1], dtype=int32))
('source_id_runtime_shapes', array([1], dtype=int32))
('groundtruth_classes_runtime_shapes', array([0], dtype=int32))

 

'groundtruth_boxes'下面的数据为:

 

Unable to display children:Error resolving variables Traceback (most recent call last): File "/Applications/PyCharm CE.app/Contents/helpers/pydev/_pydevd_bundle/pydevd_comm.py", line 1004, in do_it _typeName, valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes) TypeError: 'NoneType' object is not iterable

4.

FailedPreconditionError: Attempting to use uninitialized value dataset_data_provider/parallel_read/filenames/limit_epochs/epochs
	 [[Node: dataset_data_provider/parallel_read/filenames/limit_epochs/CountUpTo = CountUpTo[T=DT_INT64, limit=1, _device="/job:localhost/replica:0/task:0/device:CPU:0"](dataset_data_provider/parallel_read/filenames/limit_epochs/epochs)]]
	 [[Node: dataset_data_provider/parallel_read/filenames/Const/_3 = _HostRecv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device_incarnation=1, tensor_name="edge_5_dataset_data_provider/parallel_read/filenames/Const", tensor_type=DT_STRING, _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]

solving:

init = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
sess.run(init)

5.

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值