import time
start_time=time.time()
import queue
import requests
import random
import threading
from bs4 import BeautifulSoup
urls = [
f"https://www.soshuw.com/GuiMiZhiZhu/25708{page}.html"
for page in range(59, 99)
]
def craw(url):
r = requests.get(url)
return r.text
def parse(html):
soup = BeautifulSoup(html, "html.parser")
links = soup.find_all("a")
#results=[(link.get_text()) for link in links] #列表可正常写入,哪怕注释掉37、58、59行代码
results=soup.h1.string
return results
def do_craw(url_queue: queue.Queue, html_queue: queue.Queue):
while True:
url = url_queue.get()
html = craw(url)
html_queue.put(html)
print(threading.current_thread().name, f"craw {url}",
"url_queue.size=", url_queue.qsize())
def do_parse(html_queue: queue.Queue, fout):
while True:
html = html_queue.get()
results = parse(html)
#fout = open("k:/zhusc/1.txt", "a")#如果同时将37、58、59注释掉,results写入为空
fout.write(str(results) + "\n")
print(results,threading.current_thread().name, f"results.size", len(results),
"html_queue.size=", html_queue.qsize())
if __name__ == "__main__":
url_queue = queue.Queue()
html_queue = queue.Queue()
for url in urls:
url_queue.put(url)
for idx in range(3):
t = threading.Thread(target=do_craw, args=(url_queue, html_queue),
name=f"craw{idx}")
t.start()
fout = open("k:/zhusc/1.txt", "w")#没这句会报错
for idx in range(2):
t = threading.Thread(target=do_parse, args=(html_queue, fout),
name=f"parse{idx}")
t.start()
time.sleep(15)#运行15秒就罢工
fout.close() #关闭
print(time.time()-start_time)
多线程写入的玄学
最新推荐文章于 2024-07-22 22:58:19 发布