# -*- coding: utf-8 -*-
import bs4
import requests
def open_url(url):
# url = 'https://movie.douban.com/top250'
hd = {}
hd['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'
r = requests.get(url,headers=hd,timeout=10)
return r
def find_moive(r):
soup = bs4.BeautifulSoup(r.text,'html.parser')
#书名
moives = []
targets = soup.find_all('div',class_="pl2")
for each in targets:
moives.append(each.a.text.split('\n')[1].strip())
#评分
ranks = []
targets = soup.find_all('span',class_='rating_nums')
for each in targets:
ranks.append('评分: %s' % each.text)
#资料
messages =[]
pfgets = soup.find_all('p',class_="pl")
for each in pfgets:
#因为还有别的元素影响,所以要用try来避免干扰
try:
#这条信息被‘\n’分割成了三个字段,选取第二和第三字段来进行拼接,并去掉首尾的空格
messages.append(each.text)
except:
continue
#结果
result = []
length =len(moives)
for i in range(length):
result.append(moives[i] +' ' + ranks[i] + ' ' + messages[i] + '\n')
return result
#找出一共多少个页面
#def find_depth():
# soup = bs4.BeautifulSoup(r.text,'html.parser')
#
# depth = soup.find_all('span',class_="next").previuos_sibling.previuos_sibling.text
def main():
host = 'https://book.douban.com/top250'
r = open_url(host)
# depth = find_depth(r)
# depth = 10
result = []
for i in range(10):
url = host + '?start=' + '25 * i'
r = open_url(url)
result.extend(find_moive(r))
with open('booktop250.txt','w',encoding='utf-8') as f:
for each in result:
f.write(each)
if __name__ == "__main__":
main()
照葫芦画瓢,根据电影该的,里面的命名也没有改,定位花了一些时间