在多线程中使用lock可以让多个线程在共享资源的时候不会乱,例如,创建多个线程, 每个线程都往空列表中添加一个数字并打印当前的列表l,
import threading
lock = threading.Lock()
l = []
def test1(n):
lock.acquire()
l.append(n)
# print('pp',l)
print(l)
lock.release()
def test(n):
l.append(n)
print(l)
def main():
for i in range(0, 10):
thread1 = threading.Thread(target=test1, args=(i,))
thread1.start()
thread2 = threading.Thread(target=test, args=(i,))
thread2.start()
tensor中多线程类似于generator的方式,下面程序一旦运行,将不停地产生数据
import tensorflow as tf
import numpy as np
def generate_data():#模拟产生图像及标签数据
num = 25
label = np.asarray(range(0, num))
images = np.random.random([num, 5, 5, 3])
# print('label size :{}, image size {}'.format(label.shape, images.shape))
return label, images
def get_batch_data():
label, images = generate_data()
images = tf.cast(images, tf.float32)
label = tf.cast(label, tf.int32)
input_queue = tf.train.slice_input_producer([images, label], shuffle=False)
image_batch, label_batch = tf.train.batch(input_queue, batch_size=10, num_threads=1, capacity=64)
return image_batch, label_batch
image_batch, label_batch = get_batch_data()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
i = 0
try:
while not coord.should_stop():#只要现成不终止,就会不停地有数据
image_batch_v, label_batch_v = sess.run([image_batch, label_batch])
# i += 1
j=0
# for j in range(10):
print(image_batch_v.shape, label_batch_v[j])
except tf.errors.OutOfRangeError:
print("done")
finally:
coord.request_stop()
coord.join(threads)