python 爬虫,抓取小说

# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from urllib import request
import re
import os,time

#访问url,返回html页面
def get_html(url):
    req = request.Request(url)
    req.add_header('User-Agent','Mozilla/5.0')
    response = request.urlopen(url)
    html = response.read()
    return html

#从列表页获取小说书名和链接
def get_books(url):#根据列表页,返回此页的{书名:链接}的字典
    html = get_html(url)
    soup = BeautifulSoup(html,'lxml')
    fixed_html = soup.prettify()
    books = soup.find_all('div',attrs={'class':'bbox'})
    book_dict = {}
    for book in books:
        book_name = book.h3.a.string
        book_url = book.h3.a.get('href')
        book_dict[book_name] = book_url
    return book_dict

#根据书名链接,获取具体的章节{名称:链接} 的字典
def get_parts(url):
    html = get_html(url)
    soup = BeautifulSoup(html,'lxml')
    fixed_html = soup.prettify()
    part_urls = soup.find_all('a')
    host = "http://www.xiaoshuotxt.org"
    part_dict = {}
    for p in part_urls:
        p_url = str(p.get('href'))
        if re.search(r'\d{5}.html',p_url) and ("xiaoshuotxt" not in p_url):
            part_dict[p.string] = host + p_url
    return part_dict

#根据章节的url获取具体的章节内容
def get_txt(url):
    html = get_html(url)
    soup = BeautifulSoup(html,'lxml')
    fixed_html = soup.prettify()
    title = soup.h1.string #获取文章标题
    content = soup.find('div',attrs={'class':'zw'})
    txt = BeautifulSoup.get_text(content) #正文内容
    return txt

if __name__ == "__main__":
    root_dir= r'e:\books'
    #url = 'http://www.xiaoshuotxt.org/mingzhu/index_2.html' #第2页的小说
    url = "http://www.xiaoshuotxt.org/writer/58" #金庸的小说
    books = get_books(url)
    for book_name,book_url in books.items():
        os.mkdir(os.path.join(root_dir,book_name))
        part_dict = get_parts(book_url)
        print(book_name,"共:",len(part_dict),"章节")
        for part_name,part_url in part_dict.items():
            print("正在保存:",part_name)
            f1 = open(r'e:\books\%s\%s.txt'%(book_name,part_name),'w',encoding='utf-8')#以utf-8编码创建文件
            part_txt = get_txt(part_url)
            f1.write(str(part_txt))
            f1.close()
            time.sleep(2)

运行效果:
这里写图片描述

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

zhizunyu2009

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值