[Python]豆瓣TOP250的简单爬虫
豆瓣TOP250的电影网址是
https://movie.douban.com/top0?start=%d&filter=
其中TOP后面的数字代表当前显示的项目条数,25条一页,将改成25则显示第二页的25条信息。
利用Python的requests库进行请求和Beautifulsoup处理返回的信息,可以很快得到想要扒下来的结果。最后把结果用.txt文件保存下来
Python代码如下
import requests
import json
from bs4 import BeautifulSoup
Film={}
No = 1
def GrabOnePage(page):
html = 'https://movie.douban.com/top250?start=%d&filter=' %((page - 1) * 25)
headers = {
'Host': 'movie.douban.com',
'Referer': 'https://movie.douban.com/chart',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
result = requests.get(html, headers=headers)
result.encoding = 'utf-8'
soup = BeautifulSoup(result.text, 'lxml')
all_list = soup.find('ol', class_='grid_view')
item_list = all_list.find_all('li')
for x in item_list:
global No
global Film
pic = x.find('div', class_='pic')
link = pic.find('a').get('href')
name = pic.find('img').get('alt')
grade = x.find('div', class_='star').find('span', class_='rating_num').text
Film[str(No)] = dict(name=name, grade=grade, link=link)
#print(Film[str(No)])
No += 1
def main():
for i in range(1,11):
GrabOnePage(i)
f=open('Douban.txt','w')
for i in Film:
f.write(str(i)+":"+str(Film[str(i)])+"\n")
#print(str(i),end=":")
#print(Film[str(i)])
f.close()
if __name__ == '__main__':
main()