豆瓣top250排行1.1
import requests
import bs4
res = requests.get("https://movie.douban.com/top250")
soup = bs4.BeautifulSoup(res.text, "html.parser")
targets=soup.find_all("div", class_="hd")
for each in targets:
print(each.a.span.text)
导入requests 和BeautifulSoup4
获取目标网址
将res获取的文本内容导入soup,启用默认解析器“html.parser”
将所有查找到的“div”(class_="hd")放进targets中
利用for循环将targets中的内容输出。
豆瓣top250排行1.2
import requests
import bs4
import re
def open_url(url):
#使用代理
headers = {'uer-agent': 'Mozilla/5.0(windows NT 10.0;WOW64) Applewebkit/537.36(KHTML,like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
res =requests.get(url,headers=headers)
return res
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
#电影名
movies =[]
targets = soup.find_all("div",class_="hd")
for each in targets:
movies.append(each.a.span.text)
#评分
ranks =[]
targets = soup.find_all("span",class_="rating_num")
for each in targets:
ranks.append('评分:%s' % each.text)
#资料
messages =[]
targets = soup.find_all("div",class_="bd")
for each in targets:
try:
messages.append(each.p.text.split('\n')[1].strip()+each.p.text.split('\n')[2].strip())
except:
continue
result =[]
length = len(movies)
for i in range(movies):
result.append(movies[i]+ranks[i]+messages[i]+'\n')
return result
#找出一共有多少个页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
depth = soup.find('span',class_='next').previous_sibling.previous.sibling.text
return int (depth)
def main():
host = "https://moive.douban.com/top250"
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host +'/?start='+str(25*i)
res = open_url(url)
result.extend(find_movies(res))
with open("豆瓣TOP250电影.txt","w",encoding="utf-8") as f:
for each in result:
f.write(each)
if __name__=="__main__" :
main()