爬虫基础 利用xpath,requests,lxml爬取电影天堂里面的相关信息

完整代码如下:

import requests
from lxml import etree
BASE_DOMAIN = 'https://www.dy2018.com'
headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
        "Referer": "https://www.dy2018.com/" #设定从 哪个页面跳转过去
}

#这个方法用来获取详情页内容的
def parse_detail_page(url):
    req = requests.get(url,headers=headers)
    content = req.content.decode('gbk')
    movie = {}
    html =etree.HTML(content)
    title = html.xpath("//h1/text()")[0]
    movie['title'] = title
    Zoom = html.xpath("//div[@id='Zoom']")[0]
    imgs = Zoom.xpath(".//img/@src")
    cover = imgs[0]
    movie['cover'] = cover
    screenshot = imgs[1]
    movie['screenshot'] = screenshot
    def parse_info(info,rule):
        return info.replace(rule,'').strip()
    infos = Zoom.xpath(".//text()")
    # print(infos)
    for index,info in enumerate(infos):
        if info.startswith('◎年  代'):
            info = parse_info(info,'◎年  代')
            movie['year'] = info
        elif info.startswith('◎产  地'):
            info = parse_info(info, '◎产  地')
            movie['country'] = info
        elif info.startswith('◎类  别'):
            info = parse_info(info, '◎类  别')
            movie['category'] = info
        elif info.startswith('◎导  演'):
            info = parse_info(info, '◎导  演')
            directors = [info]
            for x in range(index+1,len(infos)):
                director = infos[x].strip()
                if director.startswith("◎"):
                    break
                directors.append(director)
            movie['director'] = directors
        elif info.startswith('◎主  演'):
            info = parse_info(info, '◎主  演')
            actors = [info]
            for x in range(index+1,len(infos)):
                actor = infos[x].strip()
                if actor.startswith("◎"):
                    break
                actors.append(actor)
            movie['actor'] = actors
    return movie



#获取列表页电影的详情地址
def get_detail_urls(url):
    response = requests.get(url,headers=headers,verify=False)
    # text = response.content.decode('gbk')
    text = response.text
    # print(text)
    html = etree.HTML(text)
    detail_urls = html.xpath("//table[@class='tbspan']//a/@href")

    # def a(url):
    #     return BASE_DOMAIN + url
    #
    # index = 0
    # for detail_url in detail_urls:
    #     detail_url = a(detail_url)
    #     detail_urls[index] = detail_url
    #     index += 1
    # print(detail_urls)
    detail_urls = list(map(lambda url:BASE_DOMAIN+url,detail_urls))
    return detail_urls

def main():
    start_url = "https://www.dy2018.com/html/gndy/dyzz/index_{}.html"
    movies = []
    for x in range(1,20):
        url = start_url.format(x)
        detail_urls = get_detail_urls(url)

        for detail_url in detail_urls:
            movie = parse_detail_page(detail_url)
            movies.append(movie)
    print(movies)

if __name__ == '__main__':
    main()

万水千山总是情,点个关注行不行。
你的一个小小举动,将是我分享更多干货的动力。
我的博客都是上下具有连贯性的,只看一篇可能不太懂,需要多篇结合在一起才能真正看懂,我是学习python的小颜,希望大家点个关注,一起沟通学习哦。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值