动态页面内容抓取并保存json文件

话不多说,直接上代码

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020-03-26 20:52
# @Author  : 蓝狼
# @File    : scrapy3.py
# @Desc    : 抓取动态页面到json文件
import json
import threading
import time

import requests
import logging
from os import makedirs
from os.path import exists


logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')

URL = 'https://dynamic1.scrape.cuiqingcai.com/api/movie?limit={limit}&offset={offset}'
LIMIT = 10
TOTAL_PAGE = 10


DETAIL_URL = 'https://dynamic1.scrape.cuiqingcai.com/api/movie/{id}'

def scrapy_api(url):

    logging.info(f'scrapy api starting. url:{url}')

    try:
        response = requests.get(url)
        if response.status_code == requests.codes.ok:
            return response.json()

        logging.error(f'scrapy api invalid code. status_code:{response.status_code} url:{url}')

    except requests.RequestException as e:
        logging.error(f'scrapy api is except. {e} url:{url}', exc_info=True)



def scrapy_index(page):
    scrapy_url = URL.format(limit=LIMIT, offset=(page-1)*LIMIT)
    return scrapy_api(scrapy_url)


def scrapy_detail(id):
    detail_url = DETAIL_URL.format(id=id)
    return scrapy_api(detail_url)


RESULT_DIR = 'results'
exists(RESULT_DIR) or makedirs(RESULT_DIR)

def save_data(data):
    name = data.get('name')
    data_path = f'{RESULT_DIR}/{name}.json'
    json.dump(data, open(data_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=2)


def run(data):
    detail_id = data.get('id')
    if not detail_id:
        return

    detail = scrapy_detail(detail_id)
    logging.info(f'scrapy detail. detail:{detail}')
    save_data(detail)


def main(page):

    # for page in range(1, TOTAL_PAGE+1):
    lists = scrapy_index(page)

    for list in lists['results']:
        t = threading.Thread(target=run, args=(list,))
        t.start()
        # detail_id = list.get('id')
        # detail = scrapy_detail(detail_id)
        # logging.info(f'scrapy detail. detail:{detail}')
        # save_data(detail)



if __name__ == '__main__':

    start = time.time()
    # 多进程
    import multiprocessing

    pool = multiprocessing.Pool()
    pages = range(1, TOTAL_PAGE+1)
    pool.map(main, pages)
    pool.close()
    pool.join()

    print('time usage:%f' % float(time.time()-start))

    # 多进程 30.943944215774536
    # 多进程+多线程

技术点:

  • requests
  • 多线程
  • 多进程
  • 存储json格式文件
  • logging信息输出

 

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值