应用queue thread xpath 对贴吧图片进行的爬取

import requests
from lxml import etree
from threading import Thread
from queue import Queue


class BaiduImageSpider:
    def __init__(self, tieba_name, stop_page):
        self.headers = {"User-Agent": "Mozilla/5.0"}
        self.baseurl = "http://tieba.baidu.com"
        self.pageurl = "http://tieba.baidu.com/f?"
        self.search = tieba_name
        self.stop_page = stop_page
        self.params_queue = Queue()
        self.t_link_queue = Queue()
        self.t_image_queue = Queue()

    # 获取贴吧首页分页列表
    def getUrl(self):
        for i in range(self.stop_page):
            params = {
                "kw": self.search,
                "pn": i * 50
            }
            self.params_queue.put(params)

    # 获取个人主页链接网址
    def homePageUrl(self):
        while True:
            params = self.params_queue.get()
            res = requests.get(self.pageurl, params=params, headers=self.headers)
            res.encoding = "utf-8"
            html = res.text
            # 构建解析对象
            parseHtml = etree.HTML(html)
            # 帖子链接列表
            t_list = parseHtml.xpath('//div[@class="t_con cleafix"]/div/div/div/a/@href')
            # t_list : ['/p/233432','/p/2039820',..]
            # print(t_list)
            for t_link in t_list:
                # 拼接帖子完整链接
                t_link = self.baseurl + t_link
                self.t_link_queue.put(t_link)
            self.params_queue.task_done()
            print('homePage')

    # 获取所有帖子图片链接
    def getImageUrl(self):
        while True:
            t_link = self.t_link_queue.get()
            res = requests.get(t_link, headers=self.headers)
            res.encoding = "utf-8"
            html = res.text
            # 构造解析对象
            parseHtml = etree.HTML(html)
            img_list = parseHtml.xpath('//img[@class="BDE_Image"]/@src')
            #  print(img_list)
            for img_link in img_list:
                print(img_link)
                self.t_image_queue.put(img_link)
            self.t_link_queue.task_done()
            print('getImage')

    # 获取个个人主页图片并保存到本地
    def writeImage(self):
        # 获取图片的bytes
        while True:
            img_link = self.t_image_queue.get()
            res = requests.get(img_link, headers=self.headers)
            res.encoding = "utf-8"
            html = res.content
            filename = './photo/' + img_link[-10:]
            with open(filename, "wb") as f:
                f.write(html)
                print("%s下载成功" % filename)
            self.t_image_queue.task_done()
            print('writeimage')

    def run(self):
        t_list = []
        t_url = Thread(target=self.getUrl)
        t_list.append(t_url)

        t_homePageUrl = Thread(target=self.homePageUrl)
        t_list.append(t_homePageUrl)
        for i in range(10):
            t_getImageUrl = Thread(target=self.getImageUrl)
            t_list.append(t_getImageUrl)
        for i in range(10):
            t_writeImage = Thread(target=self.writeImage)
            t_list.append(t_writeImage)

        for t  in  t_list:
            t.setDaemon(True)
            t.start()

        for q  in [self.params_queue,self.t_link_queue,self.t_image_queue]:
            q.join()

        print('over')



if __name__ == '__main__':
    spider = BaiduImageSpider('美女',2)#参数为需要爬取的贴吧名 以及爬取的页码数范围
    spider.run()

今天尝试了一下多线程爬虫,对贴吧进行图片的爬取 出现的问题 是程序最终无法停止 原因继续寻找中

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值