多线程写入的玄学

import time
start_time=time.time()
import queue
import requests
import random
import threading
from bs4 import BeautifulSoup

urls = [
    f"https://www.soshuw.com/GuiMiZhiZhu/25708{page}.html"
    for page in range(59, 99)
]

def craw(url):
    r = requests.get(url)
    return r.text

def parse(html):
    soup = BeautifulSoup(html, "html.parser")
    links = soup.find_all("a")
    #results=[(link.get_text()) for link in links] #列表可正常写入,哪怕注释掉37、58、59行代码
    results=soup.h1.string 
    return results

def do_craw(url_queue: queue.Queue, html_queue: queue.Queue):
    while True:
        url = url_queue.get()
        html = craw(url)
        html_queue.put(html)
        print(threading.current_thread().name, f"craw {url}",
        "url_queue.size=", url_queue.qsize())

def do_parse(html_queue: queue.Queue, fout):
    while True:
        html = html_queue.get()
        results = parse(html)
        #fout = open("k:/zhusc/1.txt", "a")#如果同时将37、58、59注释掉,results写入为空
        fout.write(str(results) + "\n")
        print(results,threading.current_thread().name, f"results.size", len(results),
        "html_queue.size=", html_queue.qsize())

if __name__ == "__main__":
    url_queue = queue.Queue()
    html_queue = queue.Queue()
    for url in urls:
        url_queue.put(url)

    for idx in range(3):
        t = threading.Thread(target=do_craw, args=(url_queue, html_queue),
                            name=f"craw{idx}")
        t.start()

    fout = open("k:/zhusc/1.txt", "w")#没这句会报错
    for idx in range(2):
        t = threading.Thread(target=do_parse, args=(html_queue, fout),
                            name=f"parse{idx}")
        t.start()
    time.sleep(15)#运行15秒就罢工
    fout.close() #关闭
    print(time.time()-start_time)


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值