书包网小说多线程爬虫

书包网是个很好的小说网站,提供了小说txt下载,并且网站后端高并发,不用担心随便抓一下把网站抓崩了

既然如此,何不拿来练手爬虫项目呢。

 

直接上代码把,此多线程爬虫支持爬取各种这样类似的网站,关键需要网站支持高并发,否则分分钟崩了。

毕竟5分钟一本18mb的小说,属于超级快的那种了

from lxml import etree
import requests
from threading import Thread,enumerate
import os
from time import sleep,time

headers={
# ':authority':'www.bookbao8.com',
# ':method': 'GET',
# ':path': '/book/201506/04/id_XNDMyMjA1.html',
# ':scheme': 'https',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'cache-control': 'max-age=0',
'cookie': 'Hm_lvt_79d6c18dfed73a9524dc37b056df45ec=1577182135; Hm_lpvt_79d6c18dfed73a9524dc37b056df45ec=1577182135; Hm_lvt_9e424f40a62d01a6b9036c7d25ce9a05=1577182142; trustedsite_visit=1; bk_ad=2; __cm_warden_uid=840a745a752905060cd14982b4bbc922coo; __cm_warden_upi=MTE5LjQuMjI4LjE1Nw%3D%3D; Hm_lpvt_9e424f40a62d01a6b9036c7d25ce9a05=1577185720',
'referer': 'https://www.bookbao8.com/book/201506/04/id_XNDMyMjA1.html',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}

def thread_it(func,*args):
    t = Thread(target=func,args=args)
    t.setDaemon(True)
    t.start()

def getAll(url = "https://www.bookbao8.com/book/201506/04/id_XNDMyMjA1.html"):
    r = requests.get(url,headers=headers)
    print(r.text)
    if r.status_code == 200:
        r.encoding = r.apparent_encoding
        ret = r.text
        page_source = etree.HTML(ret)
        name = page_source.xpath('//*[@id="info"]/h1/text()')
        author = page_source.xpath('//*[@id="info"]/p[1]/a/text()')
        novel_type = page_source.xpath('//*[@id="info"]/p[2]/a/text()')
        title = page_source.xpath('/html/body/div[7]/ul/li/a/text()')
        link = page_source.xpath('/html/body/div[7]/ul/li/a/@href')
        link = map(lambda x: 'https://www.bookbao8.com'+x, link)  #向列表中每个元素都加入前缀
        novel_list = list(zip(title,link))  #将两个列表用zip打包成新的zip对象并转为列表对象
        if len(novel_list) > 0:
            return name[0], author[0], novel_type[0], novel_list
        else:
            return None,None,None,None

def getOne(link=('第0001章 绝地中走出的少年', 'https://www.bookbao8.com/views/201506/04/id_XNDMyMjA1_1.html')):
    r = requests.get(link[1], headers=headers)
    if r.status_code == 200:
        r.encoding = r.apparent_encoding
        ret = r.text
        page_source = etree.HTML(ret)
        node_title = link[0]
        node_content = page_source.xpath('//*[@id="contents"]/text()')
        node_content = "".join(node_content).replace("\n \xa0 \xa0","")
        if len(node_title) > 0:
            return node_title, node_content
        else:
            return None, None
def writeOne(title,content):
    txt = "\t\t"+title+"\n"+content+"\n\n"
    return txt
def runApp(novel_list,name,t1,cwd=''):
    article_num = len(novel_list)
    xc_num = article_num//20+1
    print(f"待开启线程数量为{xc_num}")

    def inter(link,f,i):
        try:
            title, content = getOne(link)
            txt = writeOne(title, content)
            f.write(txt)
            print(f"\r线程{i}正在写入 {title}", end="")
        except Exception as e:
            print("\n爬得太快被拒绝连接,等1s递归继续")
            sleep(1)
            inter(link,f,i)

    def inner(name,i,begin,end,cwd):
        f = open(f"{cwd}downloads/{name}/{i}.txt", mode='w+', encoding='utf-8')
        for link in novel_list[begin:end]:
            inter(link, f,i)
            if link == novel_list[end - 1]:
                print(f"\n线程{i}执行完毕")
                print(f"\n剩余线程数量{len(enumerate())}")
                base_xc = 2 if not cwd else 4
                if len(enumerate()) <= base_xc:
                    print(enumerate())
                    print("\n全本下载完毕")
                    t2 = time()
                    print(f"\n本次下载小说总共耗时{round(t2 - t1)}s")
                    hebing(f"{cwd}downloads/{name}")

        f.close()

    for i in range(1,xc_num+1):
        begin = 20*(i-1)
        end = 20*i if i != xc_num else article_num
        if i == xc_num:
            print(f"\n全部线程开启完毕")
        thread_it(inner,name,i,begin,end,cwd)
        sleep(0.5)

def paixuRule(elem):
    return int(elem.split(".")[0])

def hebing(path):
    dirs = os.listdir(path)
    dirs.sort(key=paixuRule, reverse=False)
    f = open(path+".txt",mode='w+',encoding='utf-8')
    for file in dirs:
        with open(path+"/"+file,mode="r",encoding="utf-8") as f1:
            f.write(f1.read())
    f.close()
    print("小说合并完成")

if __name__ == '__main__':
    t1 = time()
    name, _, _, novel_list = getAll(url="https://www.bookbao8.com/book/201506/04/id_XNDMyMjA1.html")
    print(name)
    if not os.path.exists("downloads/" + name):
        os.mkdir("downloads/" + name)
    runApp(novel_list, name, t1)
    while True:
        pass

 

  • 4
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用Python编写的爬取起点小说多线程爬虫代码: ```python import requests from lxml import etree from queue import Queue import threading # 定义爬虫类 class Spider(): def __init__(self, url, headers): self.url = url self.headers = headers self.session = requests.Session() self.session.headers.update(headers) # 获取小说列表 def get_novel_list(self): response = self.session.get(self.url) html = etree.HTML(response.text) novel_list = html.xpath('//div[@class="book-mid-info"]/h4/a/@href') return novel_list # 获取小说信息 def get_novel_info(self, novel_url): response = self.session.get(novel_url) html = etree.HTML(response.text) novel_info = {} novel_info['title'] = html.xpath('//div[@class="book-info "]/div[@class="book-info "]/h1/em/text()')[0] novel_info['author'] = html.xpath('//div[@class="book-info "]/div[@class="book-info "]/h1/span/a/text()')[0] novel_info['intro'] = html.xpath('//div[@class="book-intro"]/p/text()')[0] novel_info['word_count'] = html.xpath('//div[@class="book-info "]/div[@class="book-info "]/p/span[1]/text()')[0] return novel_info # 定义爬取线程类 class SpiderThread(threading.Thread): def __init__(self, spider, novel_queue): threading.Thread.__init__(self) self.spider = spider self.novel_queue = novel_queue def run(self): while True: try: novel_url = self.novel_queue.get(False) novel_info = self.spider.get_novel_info(novel_url) print(novel_info) except: break # 定义主函数 def main(): url = 'https://www.qidian.com/all' headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} spider = Spider(url, headers) novel_list = spider.get_novel_list() # 创建小说队列 novel_queue = Queue() # 将小说列表加入队列 for novel_url in novel_list: novel_queue.put(novel_url) # 创建爬取线程 threads = [] for i in range(5): spider_thread = SpiderThread(spider, novel_queue) spider_thread.start() threads.append(spider_thread) # 等待所有线程结束 for t in threads: t.join() if __name__ == '__main__': main() ``` 该代码使用了Python的requests库和lxml库来进行页爬取和解析,使用了多线程来提高爬取效率。首先定义了一个Spider类来实现爬取小说列表和小说信息的功能,然后定义了一个SpiderThread类来作为爬取线程,最后在主函数中创建小说队列和爬取线程,并等待所有线程结束。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值