学习Python的日子 爬虫(5)

多线程糗事百科案例

from queue import Queue
from threading import Thread, Lock
import time
import requests
import json
from lxml import etree

# 采集线程是否退出:True退出,False不退出
crawl_exit = False
parse_exit = False


# 采集数据的线程
class ThreadCrawl(Thread):
    def __init__(self, thread_name, page_queue, data_queue):
        super(ThreadCrawl, self).__init__()
        self.thread_name = thread_name
        self.page_queue = page_queue
        self.data_queue = data_queue
        self.headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/6.0)"}

    def run(self):
        while not crawl_exit:
            try:
                page = self.page_queue.get(block=False)
                url = "https://www.qiushibaike.com/8hr/page/" + str(page) + "/"
                print("%s开始工作了,页数是:%d,url=%s" % (self.thread_name, page, url))
                request = requests.get(url, headers=self.headers)
                html = request.text
                # print(html)
                # 把数据装入data_queue队列
                self.data_queue.put(html)
                time.sleep(1)
            except Exception as e:
                pass
                # break


# 解析数据的线程
class ThreadParse(Thread):
    def __init__(self, thread_name, data_queue, file_name, lock):
        super(ThreadParse, self).__init__()
        self.thread_name = thread_name
        self.file_name = file_name
        self.data_queue = data_queue
        self.lock = lock

    def run(self):
        while not parse_exit:
            try:
                # print("%s开始工作" %self.thread_name )
                html = self.data_queue.get(block=False)
                print("%s开始解析数据:%s" % (self.thread_name, html[:10]))
                self.parse(html)
            except Exception as e:
                pass
            # break

    # 解析数据
    def parse(self, html):
        content = etree.HTML(html)
        # 使用xpath语法得到所有含有段子div
        node_lists = content.xpath("//div[contains(@id, 'qiushi_tag_')]")
        # print(node_lists)
        # items = []

        for node in node_lists:
            item = {}
            user_image = node.xpath('.//div/a/img[@class="illustration"]/@src')
            user_name = node.xpath(".//div//h2/text()")
            text = node.xpath(".//a/div/span/text()")
            zan = node.xpath(".//div/span/i/text()")
            comments = node.xpath(".//div/span/a/i/text()")
            # print(user_name,user_image,text,zan,comments)
            if len(user_image) > 0:
                item["user_image"] = user_image[0]
            if len(user_name) > 0:
                item["user_name"] = user_name[0]
            if len(text) > 0:
                item["text"] = text[0]

            if len(zan) > 0:
                item["zan"] = zan[0]
            if len(comments) > 0:
                item["comments"] = comments[0]
            print(item)

            # 添加到列表里面
            # items.append(item)
            # 获得锁,释放锁的功能,其他线程无法获得
            with self.lock:
                # 保存到qiushibaike.json中
                json.dump(item, self.file_name, ensure_ascii=False)


# 定义入口函数
def main():
    global crawl_exit
    global parse_exit
    # 创建互斥锁
    lock = Lock()
    # 定义装页数的队列,最多爬取十个页面的数据
    page_queue = Queue(10)
    for page in range(1, 11):
        page_queue.put(page)

    # 定义装每页的数据的队列
    data_queue = Queue()

    # 创建三个采集线程用于:数据的采集(请求网络得到数据)
    # 存储三个采集线程

    thread_crawls = []
    thread_names = ["采集线程1", "采集线程2", "采集线程3"]
    for thread_name in thread_names:
        crawl = ThreadCrawl(thread_name, page_queue, data_queue)
        # 启动线程
        crawl.start()
        thread_crawls.append(crawl)
    # 存储json数据的文件
    file_name = open("糗事百科.json", "a", encoding="utf-8")
    # 创建三个解析线程用于:解析html页面的数据

    thread_parses = []
    thread_names = ["解析线程1", "解析线程2", "解析线程3"]
    for thread_name in thread_names:
        parse = ThreadParse(thread_name, data_queue, file_name, lock)
        # 启动线程
        parse.start()
        thread_parses.append(parse)

    # 采集线程------
    # 等待采集线程接收,主线程才能结束
    while not page_queue.empty():
        pass
    # 采集线程结束了,退出结束
    crawl_exit = True

    # 等待采集线程结束
    for crawl in thread_crawls:
        crawl.join()
        print("%s线程结束" % str(crawl))

    # 解析线程------
    while not data_queue.empty():
        pass

    # 解析线程结束
    parse_exit = True

    # 等待采集线程结束
    for parse in thread_parses:
        parse.join()
        print("%s线程结束" % str(parse))

    # 获得锁后,其他线程无法操作文件,关闭文件--------------
    with lock:
        file_name.close()

    print("主线程执行结束了------------")


if __name__ == "__main__":
    main()

模拟登录豆瓣网案例

from selenium import webdriver
import time

#创建一个浏览器客户端,并且指定配置
#如果是PhantomJS做浏览器客户端还要设置路径
# driver = webdriver.PhantomJS(executable_path="/usr/local/bin/phantomjs")
driver = webdriver.Chrome()

driver.get("https://www.douban.com/")
time.sleep(1)
driver.save_screenshot("豆瓣首页.png")

#输入账号
driver.find_element_by_id("form_email").send_keys("账号")
#输入密码
driver.find_element_by_name("form_password").send_keys("密码")
#保存验证码的图片
driver.save_screenshot("验证码.png")
#输入验证码
# check_code = input("请输入验证码:")
# print(r"验证码是多少:%s" % check_code)
#
# driver.find_element_by_id("captcha_field").send_keys(check_code)

#点击登录按钮
driver.find_element_by_xpath("//input[@class='bn-submit']").click()

#休眠一下等待登录成功
time.sleep(3)
#保存登录成功的快照
driver.save_screenshot("登录成功.png")


#保存成功登录好的html到本地
with open("douban.html","w",encoding="utf-8") as f:
   f.write(driver.page_source)

#退出成功
driver.quit()

实现百度搜索输入框,输入字符串"尚硅谷",并且保存快照,得到页面代码保存。

from selenium import webdriver
import time

driver = webdriver.Chrome()

driver.get("https://www.baidu.com/")

driver.find_element_by_id("kw").send_keys("尚硅谷")

time.sleep(3)

driver.save_screenshot("尚硅谷.png")

with open("尚硅谷.html", "w", encoding="utf-8") as f:
    f.write(driver.page_source)

driver.quit()



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值