python用了协程以后爬虫程序

没有用之前

#coding:utf-8
import requests
from lxml import etree
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
# 线程池
import time
header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.56'
}
def demand(url):
    r=requests.get(url,headers=header)
    code=r.status_code
    if code==200:
       jiexin(r.text)
    else:
        return "产生异常"
def jiexin(html):
    tree=etree.HTML(html)
    hrefs=tree.xpath("//div[@class='box_con']//dl/dd/a/@href")

    for href in hrefs[:100]:
            url='https://www.biquge.biz'+href
            wenben(url)

            # time.sleep(1.5)
def wenben(url):
        r=requests.get(url,headers=header).text
        tree=etree.HTML(r)
        title=tree.xpath("//div[@class='bookname']/h1/text()")[0]
        print(title)
        soup=BeautifulSoup(r,'lxml')
        content=soup.find(id="content").text
        contents=content.split()
        for content in contents:
            content=content+'\n'
            fp=open(r'D:\图片爬虫练习\笔趣阁\测试1\\'+title+'.txt','a',encoding='utf-8')
            fp.write(content)


if __name__ == '__main__':
    t1=time.time()
    url=""#就不带url
    demand(url)
    t2=time.time()
    print(t2-t1)

用了协程以后

import requests
from lxml import etree
import asyncio
import aiofiles
import aiohttp
from bs4 import BeautifulSoup
# from concurrent.futures import ThreadPoolExecutor
# 线程池
import time
header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.56'
}
def demand(url):
    r=requests.get(url,headers=header)
    code=r.status_code
    if code==200:
       asyncio.run(jiexin(r.text))
    else:
        return "产生异常"

async def jiexin(html):
    tree=etree.HTML(html)
    hrefs=tree.xpath("//div[@class='box_con']//dl/dd/a/@href")
    tasks=[]
    for href in hrefs[:100]:
        url='https://www.biquge.biz'+href
        t=wenben(url)
        tasks.append(t)
    await asyncio.wait(tasks)
    # asyncio.run(asyncio.wait(tasks))


            # time.sleep(1.5)
async def wenben(url):
        async with aiohttp.ClientSession() as session:
            async with session.get(url,headers=header) as session:
                r=await session.text()
                tree=etree.HTML(r)
                title=tree.xpath("//div[@class='bookname']/h1/text()")[0]
                print(title)
                soup=BeautifulSoup(r,'lxml')
                content=soup.find(id="content").text
                contents=content.split()
                for content in contents:
                    content=content+'\n'
                    async with aiofiles.open(r'D:\图片爬虫练习\笔趣阁\测试1\\'+title+'.txt','a',encoding='utf-8')as f:
                        await f.write(content)


if __name__ == '__main__':
    t1=time.time()
    url=""
    demand(url)
    t2=time.time()
    print(t2-t1)

时间快了n倍,前100多秒,后不用10秒

侵权立删

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值