"""
1.爬取所有页面书的信息
2.取出id,构造url,爬取详情页信息
3.motor异步存储
"""
import aiohttp
import logging
import asyncio
from motor.motor_asyncio import AsyncIOMotorClient
concurrency = 10
session = None
logging.basicConfig(level=logging.INFO,format='%(levelname)s-%(message)s')
#数据库操作
motor_connect_string = 'mongodb://localhost:27017'
momgodb_name = 'books'
mongo_collection_name = 'books'
client = AsyncIOMotorClient(motor_connect_string )
db = client[momgodb_name]
collection =db[mongo_collection_name]
page_num = 10
async def index_url(page):
INDEX_URL =f'https://spa5.scrape.center/api/book/?limit=18&offset={18*(page-1)}'
return await scrape_api(INDEX_URL)
async def scrape_api(url):
async with asyncio.Semaphore(concurrency) :
try:
logging.info('scraping %s',url)
async with session.get
motor 异步存储 实战
于 2022-03-25 22:59:14 首次发布
![](https://img-home.csdnimg.cn/images/20240711042549.png)