from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
import time
import random
"""线程死锁 """
# def wait_on_future():
# f = executor.submit(pow, 5, 2)
# # This will never complete because there is only one worker thread and
# # it is executing this function.
# print(f.result())
#
#
# executor = ThreadPoolExecutor(max_workers=20)
# executor.submit(wait_on_future)
import concurrent.futures
import urllib.request
URLS = ['http://www.foxnews.com/',
'http://www.cnn.com/',
'http://europe.wsj.com/',
'http://www.bbc.co.uk/',
'http://some-made-up-domain.com/']
"""线程池官方文档示例"""
# Retrieve a single page and report the URL and contents
def load_url(url, timeout):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
else:
print('%r page is %d bytes' % (url, len(data)))
"""demo"""
def add(num, timeout=10):
s = num * random.randint(1, 10)
print("start %d %d" % (num ,s))
time.sleep(s)
print("end %d %d" % (num ,s))
return num
with ThreadPoolExecutor(max_workers=10) as excutor: # 创建线程池
future_list = [excutor.submit(add, num, num) for num in range(5)] # 提交任务开始执行任务
# no = [print(fu) for fu in future_list] # 查看任务状态
for fu in futures.as_completed(future_list): # 当future内的任务完成后,返回结果. 谁先执行完,先返回谁
print(fu.result(), fu) # 任务完成结果
Python threading 线程池
最新推荐文章于 2024-02-26 00:16:30 发布