tensorflow 多线程操作(转载)

参考:https://github.com/tensorflow/tensorflow/issues/8220
注意:此处是调用了两个GPU

tensorflow 2.x

import os
import multiprocessing


class Predictor(multiprocessing.Process):
    def __init__(self, input_queue, output_queue, gpu_id):
        multiprocessing.Process.__init__(self)
        self.input_queue = input_queue
        self.output_queue = output_queue
        self.gpu_id = gpu_id

    def run(self):
        #set GPU id before importing tensorflow!!!!!!!!!!!!!
        os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(self.gpu_id)
        #import tensorflow here
        import tensorflow as tf
        sess = tf.Session()
        print('Using device #%s' % self.gpu_id)
        a = tf.placeholder(tf.int16, name='a')
        y = tf.identity(a, name='y')
        while True:
            input = self.input_queue.get()
            if input is None:
                self.input_queue.task_done()
                print("Exiting Process %d" % self.gpu_id)
                break
            else:
                res = sess.run(y, feed_dict={a: input})
                self.input_queue.task_done()
                self.output_queue.put(res)
        sess.close()
        return

if __name__ == "__main__":
    jobs = [i for i in range(10000)]
    num_gpus = 2
    p_list = []
    input_queue = multiprocessing.JoinableQueue()
    output_queue = multiprocessing.Queue()
    for i in range(num_gpus):
        p = Predictor(input_queue, output_queue, i)
        p_list.append(p)

    for p in p_list:
        p.start()

    for job in jobs:
        input_queue.put(job)

    for i in range(num_gpus):
        input_queue.put(None)

    for i in range(len(jobs)):
        print(output_queue.get())

    input_queue.join()
    
    for p in p_list:
        p.join()

tensorflow 1.15

import os
import multiprocessing


class Predictor(multiprocessing.Process):
    def __init__(self, input_queue, output_queue, gpu_id):
        multiprocessing.Process.__init__(self)
        self.input_queue = input_queue
        self.output_queue = output_queue
        self.gpu_id = gpu_id

    def run(self):
        #set GPU id before importing tensorflow!!!!!!!!!!!!!
        os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(self.gpu_id)
        #import tensorflow here
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()
        sess = tf.Session()
        print('Using device #%s' % self.gpu_id)
        a = tf.placeholder(tf.int16, name='a')
        y = tf.identity(a, name='y')
        while True:
            input = self.input_queue.get()
            if input is None:
                self.input_queue.task_done()
                print("Exiting Process %d" % self.gpu_id)
                break
            else:
                res = sess.run(y, feed_dict={a: input})
                self.input_queue.task_done()
                self.output_queue.put(res)
        sess.close()
        return

if __name__ == "__main__":
    jobs = [i for i in range(10000)]
    num_gpus = 2
    p_list = []
    input_queue = multiprocessing.JoinableQueue()
    output_queue = multiprocessing.Queue()
    for i in range(num_gpus):
        p = Predictor(input_queue, output_queue, i)
        p_list.append(p)

    for p in p_list:
        p.start()

    for job in jobs:
        input_queue.put(job)

    for i in range(num_gpus):
        input_queue.put(None)

    for i in range(len(jobs)):
        print(output_queue.get())

    input_queue.join()
    
    for p in p_list:
        p.join()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值