import os
import random
import time
from urllib import request
def Douban_data_wash(text1):
text1 = text1.split('<h1>豆瓣电影 Top 250</h1>')[1] # 切去网页头
for i in range(0, 25):
text = text1.split('</li>')[i] # 切片
# <img width="100" alt="肖申克的救赎" src="https://img2.doubanio.com/view/photo/s_ratio_poster/public/p480747492.webp" class="">
rank = text.split('<em class=\"\">')[1].split("</em>")[0] # 获取排名
title = text.split('</span>')[0].split('>')[-1].strip() # 获取电影名
pic_url = text.split('<img width=\"100\" ')[1].split("src=\"")[1].split("\" class=\"")[0] # 获取图片url
request.urlretrieve(pic_url, filename="./豆瓣数据/" + title + ".jpg") # 下载图片
rate = text.split('v:average\">')[1].split('</span>')[0] # 评分
number = text.split('star')[1].split('<span>')[1].split('</span>')[0] # 评分人数
try:
quote = text.split('inq')[1].split('>')[1].split('<')[0] # 部分电影无推荐理由,抓取可能出现的异常indexoutofrange
except:
print(rank + "该处评价为空")
quote = " " # 设置推荐理由为空
file = open("./豆瓣数据/豆瓣数据.txt", "a") # 追加方式写文件
file.write(
"排名:" + rank + ",《" + title + "》,豆瓣评分" + rate + ",评价人数:" + number + "。推荐理由:" + quote + "\n") # 以追加方式写入文件
file.close()
print("排名{},《{}》,豆瓣评分{},{}。推荐理由:{}".format(rank, title, rate, number, quote))
def DouBanSpide(i):
url = "https://movie.douban.com/top250?start=" + str(i * 25)
user_agent = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"} # 头部
req = request.Request(url=url, headers=user_agent) # 为request加头部
html = request.urlopen(req) #
Douban_data_wash(html.read().decode()) # 数据清洗
if __name__ == '__main__':
# os.mkdir("./豆瓣数据")#创建文件夹用来保存数据,第一次运行,其它时间注释掉
for i in range(0, 10):
DouBanSpide(i)
time.sleep(random.randint(2, 10)) # 随机休眠2-10秒,防检测
豆瓣top250爬虫