one argument process
Notice that args should be itrable. like index. But we can use a little trick to tackle none iterable ones.
def func(resolver):
return resolver.getFormatedVocaloidDataInDict()
We put a “,” after none iterable args.
results = [pool.apply_async(func, args=(resolverX,)) for resolverX in [resolver for i in range(num_cores)]]
template
# import datetime
import multiprocessing as mp
# # initialise the pool
num_cores = int(mp.cpu_count())
# print("本地计算机有: " + str(num_cores) + " 核心")
pool = mp.Pool(num_cores)
# # preparation processes
processes = [pool.apply_async(func0, args=(arg0,)) for arg0 in argList]
processes.append(pool.apply_async(func1, args=(argX,)))
# start_t = datetime.datetime.now()
results = [p.get() for p in processes]
# end_t = datetime.datetime.now()
# elapsed_sec = (end_t - start_t).total_seconds()
# print("多进程计算 共消耗: " + "{:.2f}".format(elapsed_sec) + " 秒")
- func0 and func1 can be lambda func.
- prepare processes not starting any processes, so cost are really small.
- if func has return we use .get(), or .start() is enough
example
import math
import datetime
import multiprocessing as mp
def func(name, resolver):
return{name: resolver.getFormatedVocaloidDataInDict()}
if __name__ == '__main__':
start_t = datetime.datetime.now()
num_cores = int(mp.cpu_count())
print("本地计算机有: " + str(num_cores) + " 核心")
pool = mp.Pool(num_cores)
results = [pool.apply_async(func, args=(name,resolverX)) for name, resolverX in [(i, resolver) for i in range(num_cores)]]
results = [p.get() for p in results]
end_t = datetime.datetime.now()
elapsed_sec = (end_t - start_t).total_seconds()
print("多进程计算 共消耗: " + "{:.2f}".format(elapsed_sec) + " 秒")
questions
jupyter notebook not support multiprocessing package -> try multiprocess instead of multiprocessing https://stackoverflow.com/questions/48846085/python-multiprocessing-within-jupyter-notebook
progress bar -> https://www.jianshu.com/p/1d6e0d07eb4a
gpu acceleration -> https://www.cnblogs.com/noluye/p/11517489.html