Python 采集某网站小说

一、运行环境

pycharm 2020 社区版

python 3.7

 bs4

二、实际代码

# http://aixibanya.com.cn/sifang/pc6/
import os
import re

import requests
from bs4 import BeautifulSoup

from access.sprider.SpriderAccess import SpriderAccess
from base.BaseConfig import BaseConfig
from base.BaseFrame import BaseFrame
from business.sprider.DownLoadFile import DownLoadFile
from business.sprider.UserAgent import UserAgent
from object.entity.SpriderEntity import SpriderEntity
from plugin.Tools import Tools


class QuLa_WanBen:
    base_url = "https://www.qu.la/"
    save_path = BaseConfig().CORPUS_ROOT + os.sep + "QuLa"
    dict_column_list = {'tabData_3': '都市言情', 'tabData_1': '玄幻奇幻', 'tabData_2': '武侠仙侠', 'tabData_4': '历史军事',
                        'tabData_5': '科幻灵异', 'tabData_6': '网游竞技'}

    def __init__(self):
        Tools.judge_diskpath_exits_create(self.save_path)
        pass

    def sprider_story(self):
        BaseFrame.__log__("开始采集笔趣阁排行榜小说...")
        self.story_url = self.base_url + "wanbenxiaoshuo"
        try:
            response = requests.get(self.story_url, timeout=30, headers=UserAgent().get_random_header(self.story_url))
            response.encoding = 'UTF-8'
            soup = BeautifulSoup(response.text, "html5lib")
            for column in self.dict_column_list:
                div_tabData_1 = soup.find('div', attrs={"id": "" + column + ""})
                div_list = div_tabData_1.findAll('div', attrs={"class": 'topbooks'})
                for div in div_list:
                    # 网址集合<a href="/book/4140/" title="太古神王" target="_blank">太古神王</a>
                    a_list = div.find_all('a', attrs={"target": '_blank'})
                    for a in a_list:
                        content_url = self.base_url + a.get("href")  # 小说的网址
                        txt_title = a.get("title")  # 小说名称
                        main_id = Tools.get_guid()  # 主表使用的ID
                        try:
                            response = requests.get(content_url, timeout=30,
                                                    headers=UserAgent().get_random_header(content_url))
                            response.encoding = 'UTF-8'
                            soup = BeautifulSoup(response.text, "html5lib")
                            # 小说的图片
                            image_parent_div = soup.find("div", attrs={"id": "fmimg"})
                            story_image_url = self.base_url + image_parent_div.find("img").get("src")
                            second_path = self.dict_column_list[column]
                            DownLoadFile(self.save_path).__down_load_image__(story_image_url, txt_title, second_path, 0)
                            # 小说的作者
                            author_parent_div = soup.find("div", attrs={"id": "info"})
                            author = str(author_parent_div.find("p").text).split(":")[1]
                            # 小说的正文
                            dl_tag = soup.find('dl')
                            a_list = dl_tag.find_all('a')
                            txt_index = 0  # 小说章节序号
                            for a_tag in a_list:

                                href = a_tag.get("href")
                                if "book" in href:  # 去掉介绍和没用
                                    url = self.base_url + href
                                    title = a_tag.text
                                    detail_id = Tools.get_guid()
                                    txt_index = txt_index + 1

                                    mmEntity = SpriderEntity()
                                    mmEntity.sprider_base_url = self.base_url
                                    mmEntity.create_datetime = Tools.get_current_datetime()
                                    mmEntity.sprider_url = url
                                    mmEntity.sprider_pic_title = title
                                    mmEntity.sprider_pic_index = str(1)
                                    if SpriderAccess().query_sprider_entity_by_urlandtitle(url, title) is None:
                                        SpriderAccess().save_sprider(mmEntity)
                                        image_relative_path = "QuLa" + os.sep + str(
                                            self.dict_column_list[
                                                column]) + os.sep + txt_title + os.sep + txt_title + ".jpg"
                                        self.get_content(url, title, txt_title, self.dict_column_list[column],
                                                         image_relative_path,
                                                         author, txt_index, main_id, detail_id)
                        except Exception as e:
                            BaseFrame.__err__("采集" + content_url + "出现错误" + str(e))
                            pass
                pass
        except Exception as e:
            BaseFrame.__err__("采集出现错误" + str(e))
            pass

    def get_content(self, url, chapter, txt_title, txt_coloum, image_path, author, txt_index, main_id, detail_id):
        """
        写文件至文本中
        :param url:采集的URL
        :param chapter: 章节名称
        :param txt_title: 小说名称
        :return:
        """
        txt_path = self.save_path + os.sep + txt_coloum + os.sep + str(txt_title)
        Tools.judge_diskpath_exits_create(txt_path)
        try:
            BaseFrame.__log__("正在采集 " + chapter + " " + url + " 上的小说...")
            response = requests.get(url, timeout=60, headers=UserAgent().get_random_header(url))
            response.encoding = 'UTF-8'
            soup = BeautifulSoup(response.text, "html5lib")
            content = soup.find('div', attrs={"id": 'content'})
            new_content = chapter + "\n" + str(content). \
                replace('<br/>', '\n'). \
                replace("<script>chaptererror();</script>", ""). \
                replace("<div id=\"content\">", ""). \
                replace("</div>", "")
            rightName = re.sub('[\/:*?"<>|]', '', str(chapter))
            chapter_full_path = txt_path + os.sep + rightName + ".txt"
            Tools.write_string_to_txt(chapter_full_path, new_content)
            execl_path = self.save_path + os.sep + "StoryTemplate.txt"
            chapter_path = "QuLa" + os.sep + txt_coloum + os.sep + txt_title + os.sep + rightName + ".txt"

            story_data = [txt_title, author, image_path, chapter, chapter_path, txt_index, txt_coloum, main_id,
                          detail_id]
            self.write_execl(execl_path, story_data)
        except Exception as e:
            BaseFrame.__err__("采集" + chapter + "出现错误" + str(e) + "尝试重新采集.")
            self.get_content(url, chapter, txt_title, txt_coloum, image_path, author, txt_index, main_id, detail_id)
            pass

    # region 写Execl
    def write_execl(self, execl_path, story_data):
        split_sign = "@"
        content = str(story_data[0]) + split_sign + str(story_data[1]) + split_sign + str(story_data[2]) + split_sign + \
                  story_data[
                      3] + split_sign + str(story_data[4]) + split_sign + str(story_data[5]) + split_sign + str(
            story_data[6]) + split_sign + story_data[7] + split_sign + story_data[8] + "\n"
        Tools.write_string_to_txt(execl_path, content)
    # endregion


# if __name__ == '__main__':
#     QuLa_WanBen().sprider_story()
# pass

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

亚丁号

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值