python tofile按格式输出,python生成tensorflow输入输出的图像格式的方法

TensorFLow能够识别的图像文件,可以通过numpy,使用tf.Variable或者tf.placeholder加载进tensorflow;也可以通过自带函数(tf.read)读取,当图像文件过多时,一般使用pipeline通过队列的方法进行读取。下面我们介绍两种生成tensorflow的图像格式的方法,供给tensorflow的graph的输入与输出。

import cv2

import numpy as np

import h5py

height = 460

width = 345

with h5py.File('make3d_dataset_f460.mat','r') as f:

images = f['images'][:]

image_num = len(images)

data = np.zeros((image_num, height, width, 3), np.uint8)

data = images.transpose((0,3,2,1))

先生成图像文件的路径:ls *.jpg> list.txt

import cv2

import numpy as np

image_path = './'

list_file = 'list.txt'

height = 48

width = 48

image_name_list = [] # read image

with open(image_path + list_file) as fid:

image_name_list = [x.strip() for x in fid.readlines()]

image_num = len(image_name_list)

data = np.zeros((image_num, height, width, 3), np.uint8)

for idx in range(image_num):

img = cv2.imread(image_name_list[idx])

img = cv2.resize(img, (height, width))

data[idx, :, :, :] = img

2 Tensorflow自带函数读取

def get_image(image_path):

"""Reads the jpg image from image_path.

Returns the image as a tf.float32 tensor

Args:

image_path: tf.string tensor

Reuturn:

the decoded jpeg image casted to float32

"""

return tf.image.convert_image_dtype(

tf.image.decode_jpeg(

tf.read_file(image_path), channels=3),

dtype=tf.uint8)

pipeline读取方法

# Example on how to use the tensorflow input pipelines. The explanation can be found here ischlag.github.io.

import tensorflow as tf

import random

from tensorflow.python.framework import ops

from tensorflow.python.framework import dtypes

dataset_path = "/path/to/your/dataset/mnist/"

test_labels_file = "test-labels.csv"

train_labels_file = "train-labels.csv"

test_set_size = 5

IMAGE_HEIGHT = 28

IMAGE_WIDTH = 28

NUM_CHANNELS = 3

BATCH_SIZE = 5

def encode_label(label):

return int(label)

def read_label_file(file):

f = open(file, "r")

filepaths = []

labels = []

for line in f:

filepath, label = line.split(",")

filepaths.append(filepath)

labels.append(encode_label(label))

return filepaths, labels

# reading labels and file path

train_filepaths, train_labels = read_label_file(dataset_path + train_labels_file)

test_filepaths, test_labels = read_label_file(dataset_path + test_labels_file)

# transform relative path into full path

train_filepaths = [ dataset_path + fp for fp in train_filepaths]

test_filepaths = [ dataset_path + fp for fp in test_filepaths]

# for this example we will create or own test partition

all_filepaths = train_filepaths + test_filepaths

all_labels = train_labels + test_labels

all_filepaths = all_filepaths[:20]

all_labels = all_labels[:20]

# convert string into tensors

all_images = ops.convert_to_tensor(all_filepaths, dtype=dtypes.string)

all_labels = ops.convert_to_tensor(all_labels, dtype=dtypes.int32)

# create a partition vector

partitions = [0] * len(all_filepaths)

partitions[:test_set_size] = [1] * test_set_size

random.shuffle(partitions)

# partition our data into a test and train set according to our partition vector

train_images, test_images = tf.dynamic_partition(all_images, partitions, 2)

train_labels, test_labels = tf.dynamic_partition(all_labels, partitions, 2)

# create input queues

train_input_queue = tf.train.slice_input_producer(

[train_images, train_labels],

shuffle=False)

test_input_queue = tf.train.slice_input_producer(

[test_images, test_labels],

shuffle=False)

# process path and string tensor into an image and a label

file_content = tf.read_file(train_input_queue[0])

train_image = tf.image.decode_jpeg(file_content, channels=NUM_CHANNELS)

train_label = train_input_queue[1]

file_content = tf.read_file(test_input_queue[0])

test_image = tf.image.decode_jpeg(file_content, channels=NUM_CHANNELS)

test_label = test_input_queue[1]

# define tensor shape

train_image.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS])

test_image.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS])

# collect batches of images before processing

train_image_batch, train_label_batch = tf.train.batch(

[train_image, train_label],

batch_size=BATCH_SIZE

#,num_threads=1

)

test_image_batch, test_label_batch = tf.train.batch(

[test_image, test_label],

batch_size=BATCH_SIZE

#,num_threads=1

)

print "input pipeline ready"

with tf.Session() as sess:

# initialize the variables

sess.run(tf.initialize_all_variables())

# initialize the queue threads to start to shovel data

coord = tf.train.Coordinator()

threads = tf.train.start_queue_runners(coord=coord)

print "from the train set:"

for i in range(20):

print sess.run(train_label_batch)

print "from the test set:"

for i in range(10):

print sess.run(test_label_batch)

# stop our queue threads and properly close the session

coord.request_stop()

coord.join(threads)

sess.close()

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持聚米学院。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值