python爬取 电影天堂 影视数据

python爬取 电影天堂 影视数据

我的第一个比较实用的爬虫,2019年8月3日测试可用,全部复制粘贴运行即可,需要安装C语言库,可使用 pip 安装:pip install lxml

from lxml import etree
import requests,time,random

BASE_DOMAIN = "https://dytt8.net"
HEADERS = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}

# 每个电影的详情页爬取
def parse_detail_page(movie_url):
    movie = {}
    print(movie_url)
    response = requests.get(movie_url,headers = HEADERS)
    html = etree.HTML(response.content.decode('gbk','replace'))
    movie['◎']=html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0]
    zoom = html.xpath("//div[@id='Zoom']")[0]
    try:
        movie['海报']=zoom.xpath(".//img/@src")[0]
    except IndexError:
        movie['海报']="暂无海报"
    try:
        movie['截图']=zoom.xpath(".//img/@src")[1]
    except IndexError:
        movie['截图']="暂无截图"
    infos=zoom.xpath(".//text()")

	# 对抓取到的文本一行行解析
    def parse_info(info,rule):
        if info.startswith(rule):
            info = info.replace(rule, "").strip()
            name="".join(rule.split()).replace("◎","")
            movie[name]=info
            # print(name+":"+info)

	#对抓取到的数据一段段解析
    def parse_infos(info,rule,infos,index):
        if info.startswith(rule):
            info = info.replace(rule, "").strip()
            name="".join(rule.split()).replace("◎","")
            movie[name]=[info]
            for x in range(index+1,len(infos)):
                if(infos[x].startswith("◎") or infos[x].startswith("【")):
                    break
                movie[name].append(infos[x].strip())
            while '' in movie[name]:
                movie[name].remove('')
            # print(name+":")
            # print(movie[name])

    for index,info in enumerate(infos):
        parse_info(info,"◎译  名")
        parse_info(info,"◎片  名")
        parse_info(info,"◎年  代")
        parse_info(info,"◎产  地")
        parse_info(info,"◎类  别")
        parse_info(info,"◎语  言")
        parse_info(info,"◎字  幕")
        parse_info(info,"◎上映日期")
        parse_info(info,"◎IMDb评分")
        parse_info(info,"◎豆瓣评分")
        parse_info(info,"◎视频尺寸")
        parse_info(info,"◎文件格式")
        parse_info(info,"◎文件大小")
        parse_info(info,"◎片  长")
        parse_info(info,"◎导  演")
        parse_info(info,"◎标  签")
        parse_infos(info,"◎编  剧",infos,index)
        parse_infos(info,"◎主  演",infos,index)
        parse_infos(info,"◎简  介",infos,index)
	# 检查资源是否存在
    try:
        movie["下载地址1"]=zoom.xpath("(.//a/@href)")[0]
    except IndexError:
        movie["下载地址1"] ="暂无"
    # print(movie["磁力"])
    try:
        movie["下载地址2"]=zoom.xpath(".//a/@href")[1]
    except IndexError:
        movie["下载地址2"]="暂无"
    # print(movie["迅雷"])
    return movie

# 拼接url,返回每部电影详情页的URL
def get_detail_urls(url):
    response = requests.get(url, headers=HEADERS)
    html = etree.HTML(response.text)
    detail_urls = html.xpath('//table[@class="tbspan"]//a[last()]/@href')
    return map(lambda url: BASE_DOMAIN + url, detail_urls)

# 将一页电影(25部)的数据保存在本地data.text文件中
def mywrite(movies):
    with open("data.txt","a",encoding="utf-8") as fp:
        for movie in movies:
            for movie_list in movie:
                if(not isinstance(movie[movie_list],list)):
                    fp.write(movie_list+":"+movie[movie_list]+"\n")
                else:
                    fp.write(movie_list+":\n")
                    for a in movie[movie_list]:
                        fp.write("     "+a+"\n")
            fp.write("\n\n")
    fp.close()
    print("写入完成!")

# 选择爬取的页数,当前是1-198页
def spider(a,b):
    base_url = "https://dytt8.net/html/gndy/dyzz/list_23_{}.html"
    movies=[]
    for x in range(a, b):
        print("正在爬取第 "+str(x)+" 页,还剩 "+str(b-x-1)+" 页...")
        url = base_url.format(x)
        movie_urls = get_detail_urls(url)
        i=0
        for movie_url in movie_urls:
            i+=1
            print(str(i)+" ",end="")
            movie = parse_detail_page(movie_url)
            movies.append(movie)
        print("\n已爬取第 "+str(x)+" 页,正在写入文件 data.txt!")
        mywrite(movies)
        movies.clear()
        # 防止爬虫检测,每次爬取一页数据需要随机等待一段时间
        s = random.randint(10,20)
        print("防止检测,延时 "+str(s)+" 秒");
        time.sleep(s)

# a,b为要抓取的页数,建议两者之差在50以内
if __name__ == '__main__':
    a=int(input("从这一页开始爬取【1+】:"))
    b=int(input("爬取到这一页结束【198-】:"))
    print("一共"+str(b-a+1)+"页,开始爬取。。。。。。")
    start = time.perf_counter()
    spider(a,b+1)
    end = time.perf_counter()
    print("所有任务完成!用时 "+str(end-start)+" 秒!")

运行截图:
截图
爬取到的数据截图【共43.4MB】:
截图2

  • 2
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值