利用爬虫找网络小说

1, 获得每个章节的内容

from multiprocessing.dummy import Pool
import requests
from bs4 import BeautifulSoup
import re
import os


def combine_name(snum, title):
    """
    根据标题的章节数和标题生成可排序的形式
    :param snum: 章节数
    :param title: 题目
    :return: 中文题目, *****(五位数字)
    """
    st = int(396871)
    num = int(snum[31:-5]) - st
    title = title.split()[-1]
    if num <10:
        snum = '0000'+ str(num)
    elif num <100:
        snum = '000'+ str(num)
    elif num <1000:
        snum = '00'+ str(num)
    elif num <10000:
        snum = '0' + str(num)
    else:
        snum = str(num)
    return title, snum


def get_html(url):
    """
    获取指定网页的源代码
    :param url: 网址
    :return: 该网页源代码
    """
    html = requests.get(url)  # utf-8
    return html


def get_chapter_url(source_url, html):
    """
    :param source_url: 该网页的网址
    :param html: 网页的源代码
    :return: 每一章文章的链接(列表的形式)
    """
    url_list = []

    soup = BeautifulSoup(html.text, 'lxml')
    chapters = soup.find_all('a')

    for chapter in chapters[100:-10]:
        temp = chapter.attrs['href']
        url_list.append(source_url + temp[7:])
        # print(url_list[-1])  # 打印出每个章节的网址

    return url_list


def get_article(url):
    """
    获得小说正文内容,并保存
    :param url: 每个章节的网址
    :return: None
    """
    try:  # 这里是为了防止有些章节出现 作者请假 等你特殊情况。会把这些章节的网址单独拉出来
        # print(url)
        html = get_html(url)  # 获得网页源代码

        # 获得标题内容
        soup = BeautifulSoup(html.text, 'lxml')
        head_text = soup.find_all('h1')[0].text
        # print(head_text)

        # 生成标题内容
        _, chapter_name = combine_name(url, head_text)
        # print(chapter_name)

        # 获得正文内容
        text = re.search('<div id="content">(.*?)</div>', html.text, re.S).group(0)
        text = '    '+text[18:-6].replace("<br><br>",'\n    ').replace('&nbsp;', '')
        text = _ + '\n'+ text[:-2]
        # print(text)

        with open(os.path.join('****', chapter_name + '.txt'), 'w', encoding='utf8') as f:
            f.write(text)
    except Exception:
        print(url)


def multiprocess_get_article(url_list):
    # get_article(url_list[129])
    pool = Pool(10)
    pool.map(get_article, url_list)


def main():
    book_name = '****'  # 不便写出书名
    os.makedirs(book_name, exist_ok=True)
    url = 'https://www.ibooktxt.com/0_646/'   # 总章节的网址
    html = get_html(url)
    chapter_list = get_chapter_url(url, html)
    multiprocess_get_article(chapter_list)


if __name__ == '__main__':
    main()

2,将这些章节整合到一起

import os


def combine_name(num):
    """
    根据标题的章节数的形式
    :param snum: 章节数
    :return: *****(五位数字)
    """
    if num < 10:
        snum = '0000'+ str(num)
    elif num < 100:
        snum = '000' + str(num)
    elif num < 1000:
        snum = '00'  + str(num)
    elif num < 10000:
        snum = '0'   + str(num)
    else:
        snum = str(num)
    return snum


def main():
    TEXT = ''
    chapter_num = 74
    for i in range(74,1512):
        path = combine_name(i)
        try:
            with open(os.path.join('****', path + '.txt'), 'r', encoding='utf8') as f:
                text = f.read()
        except Exception:
            continue
        TEXT += '第'+str(chapter_num)+'章 ' + text + '\f'
        chapter_num += 1


    with open(os.path.join('****', '整篇' + '.txt'), 'a', encoding='utf8') as f:
        f.write(TEXT)


if __name__ == '__main__':
    main()
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值