打开豆瓣电影url:https://movie.douban.com/
找到真正存储信息的url:https://movie.douban.com/j/search_subjects?type=movie&tag=热门&page_limit=50&page_start=0
在上篇文章中已经说了怎么去找动态网站的真实存储数据的url就不多说了
打开网址
我们这次就需要这些信息
#!/usr/bin/env python
-- coding: utf-8 --
@Time : 2019/6/10 22:35
@Site :
@File : doubantest.py
@Software: PyCharm
import re
import requests
from requests import RequestException
from demo01.util import buid_proxy
from urllib.parse import urlencode
import json
import os
proxies=buid_proxy()
headers={
“User-Agent”:“Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36”,
“Referer”:“https://movie.douban.com/”
}
def get_one_page():
#构造真实url
paramete = {
‘type’: ‘movie’,
‘tag’: ‘热门’,
‘page_limit’: ‘50’,
‘page_start’: 0
}
url='https://movie.douban.com/j/search_subjects?’+urlencode(paramete)
#print(url)#最好打印一下,自己构造是否正确
try:
res=requests.get(url,headers=headers,proxies=proxies)
if res.status_code==200:
html=res.text
html=json.loads(html)#
return html
return None
except RequestException:
return None
def parse_one_page(data):
if data and ‘subjects’ in data.keys():#真实的url是一个大字典,subjects是这个字典key值,我们需要的数据都是这个value
# print(data)
if data !=None:#因为我迭代的时候出现空值,空值不能被迭代,会一直报错
for item in data.get(‘subjects’):
#print(item)
movie_id=item.get(‘id’)
title=item.get(‘title’)
movie_url=item.get(‘url’)
img_url=item.get(‘cover’)
move_info={#
“movie_id”:movie_id,
“movie_name”:title,
“movie_url”:movie_url,
“img_url”:img_url
}
# print(move_info)
return move_info
def save_to_file(content):
def save_img_to_file(data):
path=‘E:/test001/photo/douban/’
if data and ‘subjects’ in data.keys():
# print(data)
if data != None:
for item in data.get(‘subjects’):
# print(item)
title = item.get(‘title’)
img_url = item.get(‘cover’)
print(title,img_url)
img_path=path+title+’.jpg’#构造img存储地址
response=requests.get(url=img_url,proxies=proxies)
#这里表明一下,这个地方一定要这样写不然后会一直报各种错误,例如乱码,json格式错误,或者需要自己类型啊
content=response.text
content=content.encode(encoding=‘utf-8’)
with open(img_path,‘wb’) as f:
f.write(content)
f.close()
def main():
data=get_one_page()
# print(data)
parse_one_page(data)
cont=save_img_to_file(data)
print(cont)
if name==‘main’:
main()