python爬虫抢任务_Python 爬虫,多线程爬虫,任务队列Queue

demo.py(多线程爬虫):

# coding=utf-8

import requests

from lxml import etree

import threading

from queue import Queue

class QiubaiSpdier:

def __init__(self):

self.url_temp = "https://www.qiushibaike.com/8hr/page/{}/"

self.headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"}

self.url_queue = Queue()

self.html_queue = Queue()

self.content_queue = Queue()

def get_url_list(self):

for i in range(1,14):

self.url_queue.put(self.url_temp.format(i))

def parse_url(self):

while True:

url = self.url_queue.get() # 从任务队列中取出一个任务

print(url)

response = requests.get(url,headers=self.headers)

self.html_queue.put(response.content.decode())

self.url_queue.task_done() # 表示该任务已经完成

def get_content_list(self): # 提取数据

while True:

html_str = self.html_queue.get()

html = etree.HTML(html_str)

div_list = html.xpath("//div[@id='content-left']/div") # 分组

content_list = []

for div in div_list:

item= {}

item["content"] = div.xpath(".//div[@class='content']/span/text()")

item["content"] = [i.replace("\n","") for i in item["content"]]

item["content_img"] = div.xpath(".//div[@class='thumb']/a/img/@src")

item["content_img"] = "https:"+item["content_img"][0] if len(item["content_img"])>0 else None

content_list.append(item)

self.content_queue.put(content_list)

self.html_queue.task_done()

def save_content_list(self): # 保存

while True:

content_list = self.content_queue.get()

for i in content_list:

# print(i)

pass

self.content_queue.task_done()

def run(self): # 实现主要逻辑

thread_list = []

# 1.url_list

t_url = threading.Thread(target=self.get_url_list)

thread_list.append(t_url)

# 2.遍历,发送请求,获取响应

for i in range(20):

t_parse = threading.Thread(target=self.parse_url)

thread_list.append(t_parse)

# 3.提取数据

for i in range(2):

t_html = threading.Thread(target=self.get_content_list)

thread_list.append(t_html)

# 4.保存

t_save = threading.Thread(target=self.save_content_list)

thread_list.append(t_save)

for t in thread_list:

t.setDaemon(True) # 把子线程设置为守护线程,主线程结束,子线程自动结束

t.start()

for q in [self.url_queue,self.html_queue,self.content_queue]:

q.join() # 让主线程等待阻塞,等待队列的任务完成之后再结束主线程

print("主线程结束")

if __name__ == '__main__':

qiubai = QiubaiSpdier()

qiubai.run()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值