2.5基础爬虫案例实战
爬取网站上电影的信息并储存
import requests # 爬取页面
import logging # 输出信息
import re # 解析正则表达式
from urllib.parse import urljoin # 拼接URL
import json
from os import makedirs
from os.path import exists
import multiprocessing # 多进程加速爬虫
logging.basicConfig(level=logging.INFO, format='%(asctime)s-%(levelname)s:%(message)s')
# level 日志输出级别 INFO 确认一切按预期运行
# format 日志输出格式
# %(asctime)s 日志时间 %(levelname)s 日志级别名称 %(message)s 日志信息
BASE_URL = 'https://ssr1.scrape.center' # 当前站点的根URL
TOTAL_PAGE = 10 # 总页码
RESULTS_DIR = 'results' # 保存数据文件夹results
exists(RESULTS_DIR) or makedirs(RESULTS_DIR) # or 如果值1为假,则演算值2
# 如results文件夹不存在,则创建一个
# 通用页面爬取方法
def scrape_page(url):
logging.info('scraping %s...', url)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
# 程序遇到第一个return即返回
logging.error('get invaaild status code %s while scraping %s', response.status_code, url)
except requests.RequestException:
logging.error('error occurred while scraping %s', url, exc_info=True)
# 爬取列表页
def scrape_index(page):
index_url = f'{BASE_URL}/page/{page}'
# f 字符串中插入变量
return scrape_page(index_url)
# 解析列表页,得到每部电影的详情页URL
def parse_index(html):
pattern = re.compile('<a.*?href="(.*?)".?class="name">')
items = re.findall(pattern, html)
if not items:
return []
for item in items:
detail_url = urljoin(BASE_URL, item) # 拼接出详情页URL
logging.info('get detail url %s', detail_url)
yield detail_url
# 爬取每部影片详情页
def scrape_detail(url):
return scrape_page(url)
# 爬取详情页数据
def parse_detail(html):
# cover封面 name电影名称 category类别
# published_at上映时间 drama 简介 score评分
cover_pattern = re.compile('class="item.*?<img.*?src="(.*?)".*?class="cover">', re.S)
name_pattern = re.compile('<h2.*?>(.*?)</h2>', re.S)
category_pattern = re.compile('<button.*?category.*?<span>(.*?)</span>.*?</button>', re.S)
published_at_pattern = re.compile('(\d{4}-\d{2}-\d{2})\s?上映', re.S)
drama_pattern = re.compile('<div.*?drama.*?<p.*?>(.*?)</p>', re.S)
score_pattern = re.compile('<p.*?score.*?>(.*?)</p>')
cover = re.search(cover_pattern, html).group(1).strip() if re.search(cover_pattern, html) else None
name = re.search(name_pattern, html).group(1).strip() if re.search(name_pattern, html) else None
categories = re.findall(category_pattern, html) if re.findall(category_pattern, html) else []
published_at = re.search(published_at_pattern, html).group(1) if re.search(published_at_pattern, html) else None
drama = re.search(drama_pattern, html).group(1).strip() if re.search(drama_pattern, html) else None
score = re.search(score_pattern, html).group(1).strip() if re.search(score_pattern, html) else None
return {
'cover': cover,
'name': name,
'categories': categories,
'published_at': published_at,
'drama': drama,
'score': score
}
# 保存数据
def save_data(data):
name = data.get('name')
data_path = f'{RESULTS_DIR}/{name}.json'
json.dump(data, open(data_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=2)
# 串联调用上述方法
def main(page):
index_html = scrape_index(page) # 爬取列表页
detail_urls = parse_index(index_html) # 解析列表页,得到每部电影的详情页URL
# logging.info('detail urls %s', list(detail_urls))
for detail_url in detail_urls:
detail_html = scrape_detail(detail_url) # 爬取详情页
data = parse_detail(detail_html) # 爬取详情页数据
logging.info('get detail data %s', data)
logging.info('saving data to json file')
save_data(data)
logging.info('data saved successfully')
if __name__ == '__main__':
pool = multiprocessing.Pool()
pages = range(1, TOTAL_PAGE+1)
pool.map(main, pages)
pool.close()
pool.join()