爬取鬼灭之刃漫画

import requests
import pandas as pd
from bs4 import BeautifulSoup
import os
import time



headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}


def getHTMLText(url):
    try:
        r = requests.get(url, headers=headers, timeout=20)
        r.raise_for_status()
        r.encoding = 'unicode'
        return r.text
    except:
        print("爬取失败")


def getChapterList(url, chapter_name_list, chapter_link_list):
    html = getHTMLText(url)
    soup = BeautifulSoup(html, "html.parser")
    for tag in soup.find_all(class_='j-chapter-link'):
        if tag.string is not None:
            chapter_link = url + tag['data-hreflink'].split("/")[-1]
            chapter_name = tag.string.strip()
            chapter_link_list.append(chapter_link)
            chapter_name_list.append(chapter_name)
    return chapter_link_list.reverse(), chapter_name_list.reverse()


def saveChapterName(chapter_name_list, chapter_link_list):
    DataSet = list(zip(chapter_name_list, chapter_link_list))
    df = pd.DataFrame(data=DataSet, columns=['name', 'link'])
    print(df)
    try:
        df.to_csv("mk_chapter_name.csv", mode="a+", index=None, encoding="gb18030")
    except:
        print("数据写入失败")


def getImage(chapter_name_list, chapter_link_list, images, root1, root2):
    if not os.path.exists(root1):
        os.makedirs(root1)
    for chapter_name in chapter_name_list:
        root2 = root1+"/"+chapter_name
        if not os.path.exists(root2):
            os.makedirs(root2)
    for chapter_link in chapter_link_list:
        html = getHTMLText(chapter_link)
        soup = BeautifulSoup(html, "html.parser")
        for tag in soup.find_all(class_="lazy-read"):
            image = tag['data-src']
            images.append(image)
    for image in images:
        path = root2+"/" + image.split("/")[-1].split("!")[0]
        if not os.path.exists(path):
            with open(path, "wb") as f:
                f.write(requests.get(image).content)
                f.close()
                print(image.split("/")[-1].split("!")[0]+"图片保存成功")
                print("总进度:+str()


def main():
    url = "https://www.mkzhan.com/209827/"
    list1 = []
    list2 = []
    list3 = []
    root = "D://guimiezhiren"
    root2 = ""
    getHTMLText(url)
    getChapterList(url, list1, list2)
    saveChapterName(list1, list2)
    getImage(list1, list2, list3, root, root2)


main()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值