# ecoding:utf-8
import requests
import random
import time
from fake_useragent import UserAgent
from lxml import etree
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import re
# ua = UserAgent()
# print(ua.random)
class DbSpider():
def __init__(self):
self.url = "https://movie.douban.com/j/chart/top_list?type={}&interval_id=100%3A90&action=&start={}&limit=20"
# self.header = random.choice(self.headers_list)
self.type_list = []
self.headers_list = []
self.item = {}
self.dy_count = 0
pass
def get_headers_list(self):
ua = UserAgent()
while len(self.headers_list) < 100:
try:
self.headers_list.append(ua.random)
except Exception as e:
continue
def get_html(self, url):
proxies = {
"https": "http://117.94.222.176:3256",
"http": "http://117.94.222.176:3256"
}
data = requests.get(url=url, verify=False, headers={"User-Agent":random.choice(self.headers_list)})
# data = requests.get(url=url, verify=False, headers={"User-Agent":random.choice(self.headers_list)}, proxies=proxies)
print(data)
time.sleep(2)
if data.status_code == 200:
return data
return None
def re_parse(self, compile, html):
res = etree.HTML(html)
parse_list = res.xpath(compile)
return parse_list
def spider_first(self):
self.get_headers_list()
url = "https://movie.douban.com/chart"
data = self.get_html(url=url)
if not data:
print("抓取失败")
return None
html = data.text
res = etree.HTML(html)
parse_list_span = res.xpath('//div[@class="types"]/span')
for index, value in enumerate(parse_list_span):
print(index, value.xpath('./a/text()')[0])
s = value.xpath('./a/@href')[0]
re_compile = re.compile(".*type=(\d+).*")
re_find = re_compile.findall(s)[0]
# print(s,re_find)
self.type_list.append(re_find)
# print(self.type_list)
user_input = input("请选择你要查看的类型,请选择数字:")
try:
db_type = self.type_list[int(user_input)]
return db_type
except Exception as e:
print("您的输入有误")
def get_count(self, db_type):
url = "https://movie.douban.com/j/chart/top_list_count?type={}&interval_id=100%3A90".format(db_type)
data = self.get_html(url).json()
self.dy_count = int(data.get("unwatched_count"))
# print(self.dy_count)
# print(type(self.dy_count))
def run(self):
db_type = self.spider_first()
self.get_count(db_type)
for i in range(0, 1000000, 20):
new_url = self.url.format(db_type, i)
data = self.get_html(new_url)
if not data:
print("没有获取到数据2")
return
List = data.json()
for adict in List:
temp = {}
temp['title'] = adict.get("title")
temp["score"] = adict.get("score")
temp["actors"] = adict.get("actors")
temp["release_date"] = adict.get("release_date")
temp["url"] = adict.get("url")
temp["vote_count"] = adict.get("vote_count")
self.item[temp['title']] = temp
print(temp)
if len(self.item) > self.dy_count:
return
if __name__ == '__main__':
db = DbSpider()
db.run()
for key, value in db.item.items():
print(value)
运行结果展示: