python爬虫 --多线程【晋级版】--阻塞--队列--公共缓冲区

本文深入探讨Python爬虫中如何利用多线程提高效率,结合队列实现任务调度,并介绍如何运用公共缓冲区优化数据传输。通过阻塞、队列和公共缓冲区的概念,提升爬虫程序的并发性能。
摘要由CSDN通过智能技术生成

阻塞

import time

from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import threading
from queue import Queue
class Tencent(threading.Thread):
    def __init__(self,q,name):
        super().__init__()
        self.q = q
        self.name = name
        # self.page = page
    def save_to_html(self,html_str,filename):
        dirname = os.path.dirname(filename)
        if not os.path.exists(dirname):
            os.mkdir(dirname)
        with open(filename,'w',encoding='utf-8') as fp:
            fp.write(html_str)
        print('下载完成!',filename)

    def get_content_by_selenium(self,url):
        driver = webdriver.PhantomJS()
        wait = WebDriverWait(driver,20)
        #请求
        driver.get(url)
        #等待
        wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="recruit-list"]')))
        #获取页面内容
        html_str = driver.page_source
        return html_str

    def download(self,i):
        base_url = 'https://careers.tencent.com/search.html?index=%s'
        html_str = self.get_content_by_selenium(base_url %i)
        self.save_to_html(html_str,'./tencent/{}.html'.format(i))
    def run(self):
        while True:
            #任务队列没任务,就不用继续获取
            if self.q.empty():
                break
            i = self.q.get()
            print('============第{}页==================@{}'.format(i,self.name))
            self.download(i)
if __name__ == '__main__':
    start = time.time()
    #用第一种创建线程来开启多线程
    crawl_list = []
    #1.线程数量可控
        #指定一个list,这个list的长度就是线程数量
    #2.任务按照顺序执行
        #将任务放到队列中
    #步骤:
    #1创建一个任务队列
    q = Queue()
    #2初始化任务队列
    for i in range(1, 20):
        q.put(i)#将页码放入任务队列
        # download(i)
        #用这种方法开启线程,弊端很大
        #任务数就是线程数,这样如果任务比较大,容易造成崩溃。
        #任务无法按照顺序来执行
        # t = threading.Thread(target=download,args=(i,))
        # t = Tencent(i)
        # t.start()
        # crawl_list.append(t)
    #3、指定一个list,这个list的长度就是线程数量
    crawl_list = ['aa','bb','cc','dd']
    #4、遍历这个list,开启线程
    join_list = []
    for crawl in crawl_list:
        t = Tencent(q,crawl)
        t.start()
        join_list.append(t)
    #阻塞主线程,我们join方法
    for t in join_list:
        t.join()#阻塞当前线程,指导t线程运行结束
    print(time.time()-start)#24.451398611068726

队列

from queue import Queue
##队列
q = Queue()

##FIFO
for i in range(100):
    q.put(i)
while True:
    if q.empty():
        break
    print(q.get())
print(q.get(block=False))

公共缓冲区

#生产者生产啥
#生产每一页html字符串
import time

from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from queue import Queue
import threading
import os
class Productor(threading.Thread):
    def __init__(self,name,q):
        super().__init__()
        self.q_page = q
        self.name = name

    def get_content_by_selenium(self, url):
        driver = webdriver.PhantomJS()
        wait = WebDriverWait(driver, 20)
        # 请求
        driver.get(url)
        # 等待
        wait.until(EC.presence_of_all_elements_located((By.XPATH, '//div[@class="recruit-list"]')))
        # 获取页面内容
        html_str = driver.page_source
        return html_str
    def download(self,page):
        base_url = 'https://careers.tencent.com/search.html?index=%s'
        html_str = self.get_content_by_selenium(base_url % page)
        return html_str
    def run(self):
        while True:
            if self.q_page.empty():
                break
            #获取页面--任务
            page = self.q_page.get()
            html_str = self.download(page)
            print('========productor第{}页=========@{}'.format(page,self.name))
            q_html.put(html_str,block=False)
#消费者保存
class Consumer(threading.Thread):
    def __init__(self,crawl):
        super().__init__()
        self.name = crawl

    def save_to_html(self, html_str, filename):
        dirname = os.path.dirname(filename)
        if not os.path.exists(dirname):
            os.mkdir(dirname)
        with open(filename, 'w', encoding='utf-8') as fp:
            fp.write(html_str)
        print('下载完成!', filename)
    def run(self):
        while True:
            if q_html.empty() and flag:
                break
            try:
                # 当q_html为空的时候,不阻塞下面的get代码,而是会抛出一个raise Empty
                html_str = q_html.get(block=False)
                print('====consumer========@{}'.format(self.name))
                self.save_to_html(html_str, './tencent/{}.html'.format(int(time.time() * 1000)))
            except Exception:
                continue

if __name__ == '__main__':
    #1、创建一个队列:就是公共缓冲区
    q_html =Queue()
    #轮询参数
    flag = False
    #2、创建生产者线程,用消息队列的方式去下载页面
    #2.1初始化任务队列
    q_page = Queue()
    for i in range(1,20):
        q_page.put(i)
    #2.2创建list,开启指定数量的生产者线程
    crawl_P = ['aa','bb','cc','dd']
    join_p = []
    for crawl in crawl_P:
        t = Productor(crawl,q_page)
        t.start()
        join_p.append(t)
    #3创建一个消费者线程的list,这个list的长度就是开启几个消费者线程
    crawl_C = ['11','22','33','44']
    join_C = []
    for crawl in crawl_C:
        t = Consumer(crawl)
        t.start()
        join_C.append(t)

    #保证生产者全部生产完成
    for p in join_p:
        p.join()
    flag = True

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值