Python爬虫(四):新增纵横中文网爬虫Demo--爬取136书屋小说,并保存至本地文本文件中,单进程多进程对比效率(以三生三世十里桃花为例)


运行环境:Python3.6


2019-05-24更新,由于原有的页面改版了,所以现在新增了一个[纵横中文网book.zongheng.com]采集代码Demo
  • 存在反爬,导致爬虫运行出错,下面两个方法亲测可以解决
    • 加入代理IP,我写了一个代理IP提取接口 -> 跳转
    • 将浏览器访问生成的Cookie信息加入到headers中;
  • 该爬虫不能正确抓取VIP权限才能访问的内容
# -*- coding: utf-8 -*-
# @Author : Leo

import re
import os
import logging
import requests
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter

logging.basicConfig(level=logging.INFO,  # 最低输出
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S')


class ZonghengSpider:
    """
    纵横中文网爬虫
    - http://book.zongheng.com/
    """
    # 小说保存主路径
    novel_save_dir = 'novels'
    session = requests.session()
    # 设置重试次数
    session.mount('http://', HTTPAdapter(max_retries=3))
    session.mount('https://', HTTPAdapter(max_retries=3))

    def __init__(self):
        self.session.headers.update(
            {'Host': 'book.zongheng.com',
             'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) '
                           'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36'})
        self.chapter_url = 'http://book.zongheng.com/api/chapter/chapterinfo?bookId={book_id}&chapterId={chapter_id}'

    def crawl(self, target_url: str):
        """
        开始爬取当前指定url
        :param target_url: 为需要爬取的书籍页面URL
        :return:
        """

        def request_url(url):
            resp = self.session.get(url=url)
            if resp.status_code == 200:
                return resp.json()
            else:
                return None

        book_name, book_id, chapter_id = self.get_page_info(target_url)
        logging.info(f'获取到的书籍名: {book_name}, 书籍ID: {book_id}, 首章ID: {chapter_id}')
        if all([book_name, book_id, chapter_id]):
            # 设置保存路径
            novel_save_path = os.path.join(self.novel_save_dir, book_name)
            if not os.path.exists(novel_save_path):
                os.makedirs(novel_save_path)
            logging.info(f'书籍保存路径: {novel_save_path}')
            index = 0
            while True:
                index += 1
                chapter_url = self._get_chapter_url(book_id, chapter_id)
                logging.info(f'当前请求章节URL: {chapter_url}')
                chapter_json = request_url(url=chapter_url)
                if chapter_json is not None:
                    chapter_data = chapter_json.get('data')
                    if not chapter_data:
                        break
                    # 章节名
                    chapter_name = chapter_data.get('chapterName')
                    content_raw = chapter_data.get('content', '')
                    # 章节正文
                    clear_content = '\n'.join(
                        [repr(p).strip('\'') for p in BeautifulSoup(content_raw, 'html.parser').strings])
                    # TODO 作者、更新时间、章节字数...
                    with open(os.path.join(novel_save_path, str(index) + '-' + chapter_name + '.txt'), 'w',
                              encoding='utf8') as f:
                        f.write(clear_content)
                        logging.info('保存当前章节成功 > %s' % os.path.join(novel_save_path, str(index) + '-' + chapter_name))
                    # 设置下一章节的chapter_id
                    chapter_id = chapter_data.get('nexCid')
                else:
                    logging.error(f'请求章节URL异常, 异常URL: {chapter_url}')
            logging.info('采集完毕')

    def get_page_info(self, homepage_url):
        """
        获取本书的book-id,以及首章的章节ID
        :param homepage_url: 书籍主页url
        :return:
        """
        resp = self.session.get(url=homepage_url)
        if resp.status_code == 200:
            soup = BeautifulSoup(resp.text, 'html.parser')
            book_name = soup.find('div', {'class': 'book-name'}).get_text().strip()
            first_chapter_tag = soup.find('a', {'class': 'btn read-btn', 'href': True})
            if first_chapter_tag is not None:
                first_chapter_url = first_chapter_tag.get('href')
                result = re.findall(r'chapter/(\d+)/(\d+).html', first_chapter_url)
                book_id, chapter_id = result[0] if result else (None, None, None)
                return book_name, book_id, chapter_id
        else:
            logging.error('请求书籍主页链接状态吗异常!')
        return None, None, None

    def _get_chapter_url(self, book_id, chapter_id):
        """
        获取章节链接
        :param book_id:
        :param chapter_id:
        :return:
        """
        return self.chapter_url.format(book_id=book_id, chapter_id=chapter_id)


if __name__ == '__main__':
    spider = ZonghengSpider()
    spider.crawl(target_url='http://book.zongheng.com/book/840152.html')

简介

  • 小说网址:http://www.136book.com/
  • 通过修改136book小说网中具体小说的url来爬取不同小说的分章节批量下载
  • 该代码以三生三世十里桃花为例(链接
  • –>http://www.136book.com/sanshengsanshimenglitaohua/

运行效果展示
似乎图挂掉了?


book136_singleprocess.py

单进程保存小说章节

#!/usr/bin/env python 
# -*- coding: utf-8 -*- 
# @Author : Woolei
# @File : book136_singleprocess.py

import requests
import time
import os
from bs4 import BeautifulSoup


headers = {
    'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
}


# 获取小说章节内容,并写入文本
def getChapterContent(each_chapter_dict):
    content_html = requests.get(each_chapter_dict['chapter_url'], headers=headers).text
    soup = BeautifulSoup(content_html, 'lxml')
    content_tag = soup.find('div', {'id': 'content'})
    p_tag = content_tag.find_all('p')
    print('正在保存的章节 --> ' + each_chapter_dict['name'])
    for each in p_tag:
        paragraph = each.get_text().strip()
        with open(each_chapter_dict['name'] + r'.txt', 'a', encoding='utf8') as f:
            f.write('  ' + paragraph + '\n\n')
            f.close()


# 获取小说各个章节的名字和url
def getChapterInfo(novel_url):
    chapter_html = requests.get(novel_url, headers=headers).text
    soup = BeautifulSoup(chapter_html, 'lxml')
    chapter_list = soup.find_all('li')
    chapter_all_dict = {}
    for each in chapter_list:
        import re
        chapter_each = {}
        chapter_each['name'] = each.find('a').get_text()  # 获取章节名字
        chapter_each['chapter_url'] = each.find('a')['href']  # 获取章节url
        chapter_num = int(re.findall('\d+', each.get_text())[0])  # 提取章节序号
        chapter_all_dict[chapter_num] = chapter_each  # 记录到所有的章节的字典中保存
    return chapter_all_dict


if __name__ == '__main__':
    start = time.clock()  # 记录程序运行起始时间
    novel_url = 'http://www.136book.com/sanshengsanshimenglitaohua/'  # 这里以三生三世十里桃花为例
    novel_info = getChapterInfo(novel_url)  # 获取小说章节记录信息
    dir_name = '保存的小说路径'
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)
    os.chdir(dir_name)  # 切换到保存小说的目录
    for each in novel_info:
        getChapterContent(novel_info[each])
        # time.sleep(1)
    end = time.clock()  # 记录程序结束的时间
    print('保存小说结束,共保存了 %d 章,消耗时间:%f s' % (len(novel_info), (end - start)))
**在下载保存的过程中发现,消耗的时间比较长。所以计划使用多进程来提高效率**

book136_multiprocess.py

多进程保存小说章节

#!/usr/bin/env python 
# -*- coding: utf-8 -*- 
# @Author : Woolei
# @File : book136_2.py 


import requests
import time
import os
from bs4 import BeautifulSoup
from multiprocessing import Pool

url = 'http://www.136book.com/huaqiangu/ebxeeql/'
headers = {
    'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
}


# 获取小说章节内容,并写入文本
def getChapterContent(each_chapter_dict):
    content_html = requests.get(each_chapter_dict['chapter_url'], headers=headers).text
    soup = BeautifulSoup(content_html, 'lxml')
    content_tag = soup.find('div', {'id': 'content'})
    p_tag = content_tag.find_all('p')
    print('正在保存的章节 --> ' + each_chapter_dict['name'])
    for each in p_tag:
        paragraph = each.get_text().strip()
        with open(each_chapter_dict['name'] + r'.txt', 'a', encoding='utf8') as f:
            f.write('  ' + paragraph + '\n\n')
            f.close()


# 获取小说各个章节的名字和url
def getChapterInfo(novel_url):
    chapter_html = requests.get(novel_url, headers=headers).text
    soup = BeautifulSoup(chapter_html, 'lxml')
    chapter_list = soup.find_all('li')
    chapter_all_dict = {}
    for each in chapter_list:
        import re
        chapter_each = {}
        chapter_each['name'] = each.find('a').get_text()  # 获取章节名字
        chapter_each['chapter_url'] = each.find('a')['href']  # 获取章节url
        chapter_num = int(re.findall('\d+', each.get_text())[0])  # 提取章节序号
        chapter_all_dict[chapter_num] = chapter_each  # 记录到所有的章节的字典中保存
    return chapter_all_dict


if __name__ == '__main__':
    start = time.clock()
    novel_url = 'http://www.136book.com/sanshengsanshimenglitaohua/'
    novel_info = getChapterInfo(novel_url)
    dir_name = '保存的小说路径'
    if not os.path.exists(dir_name):
        os.mkdir(dir_name)
    os.chdir(dir_name)
    pool = Pool(processes=10)   # 创建10个进程
    pool.map(getChapterContent, [novel_info[each] for each in novel_info])
    pool.close()
    pool.join()
    end = time.clock()
    print('多进程保存小说结束,共保存了 %d 章,消耗时间:%f s' % (len(novel_info), (end - start)))
  • 运行过程中,在任务管理器中可以看到创建了10个子进程(processes=10),可以创建多个进程来提高效率,但是不考虑电脑性能而创建过多的进程后会使电脑执行效率严重下降。
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值