还在思考该看什么电影吗?还在纠结这个电影值不值得看吗?还在苦恼找不到对口味的电影吗?福利来了!豆瓣网电影信息爬取,专业分析电影价值,有了网友们的切身体验,你还需有担心看到烂片的烦恼吗!
废话不多说,直接上代码!
开发环境:Windows10
开发语言:Python3.6
开发工具:pycharm
抓包工具:Charles
import random
import json
import jsonpath as jsonpath
import requests
import time
class DouBan_Spider():
def __init__(self):
self.start = 0
self.User_Agents = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36Name"
]
def start_work(self):
self.base_url = 'https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action=&start=' + str(
self.start) + '&limit=20'
r = requests.get(url=self.base_url, headers={'User-Agent':random.choice(self.User_Agents)})
# print(r.text)
# print(type(r.text))
self.json_data(r.text)
def json_data(self,data):
json_data = json.loads(data)
# print(json_data)
# print(type(json_data))
item = {}
# print(json_data)
movie_id_list = jsonpath.jsonpath(json_data,'$..id')
movie_name_list = jsonpath.jsonpath(json_data,'$..title')
movie_url_list = jsonpath.jsonpath(json_data,'$..url')
movie_release_date_list = jsonpath.jsonpath(json_data,'$..release_date')
movie_regions_list = jsonpath.jsonpath(json_data,'$..regions')
movie_types_list = jsonpath.jsonpath(json_data,'$..types')
movie_score_list = jsonpath.jsonpath(json_data,'$..score')
for id,name,url,release_data,regions,types,score in zip \
(movie_id_list,movie_name_list,movie_url_list,movie_release_date_list,movie_regions_list,movie_types_list,movie_score_list):
item['id'] = id
item['name'] = name
item['url'] = url
item['release_date'] = release_data
item['regions'] = regions
item['types'] = types
item['score'] = score
# print(item)
self.write_data(item)
self.start +=20
if self.start >566:
print('爬完喽~')
return
time.sleep(random.randint(1, 2))
self.start_work()
def write_data(self,item):
print('正在爬取电影:%s,URL为:%s' % (item['name'],item['url']))
content = json.dumps(item, ensure_ascii=False) + ',\n' #注意只有json.dumps或json.dump方法才有ensure_ascii参数
with open('Doubanmovies.json','a',encoding='utf-8') as f:
f.write(content)
if __name__ == '__main__':
work = DouBan_Spider()
work.start_work()