爬虫实战之爬取电影天堂全部电影信息

打开电影天堂:

            

再点击第二页,发现url分别是:https://www.dytt8.net/html/gndy/dyzz/list_23_1.htmlhttps://www.dytt8.net/html/gndy/dyzz/list_23_2.html。通过观察可以发现只有最后的数字不一样,第几页就是几。

然后查看网页源代码:

                     

发现网页的编码方式为gb2312,因此再后面解码的时候要用gb2312。

通过对网页源代码的分析可以知道我们想要的连接再table标签下。

但是这个网页中可能有很多table,但是观察到这个table有一个属性叫class=tbspan,所以就利用它成功解析出电影的链接:

from lxml import etree
import requests

url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
    'Referer':'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
}
response = requests.get(url,headers=headers)
#request库会默认使用自己的解码方式因此可能会出现乱码,所以用content
text = response.content.decode('gbk')
#拿到网页源代码之后就要对其进行解析得到element对象
html = etree.HTML(text)
#得到页面以后就需要用xpath得到每个电影的连接
detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
for detail_url in detail_urls:
    print(detail_url)
/html/gndy/dyzz/20190117/58089.html
/html/gndy/dyzz/20190117/58088.html
/html/gndy/dyzz/20190115/58085.html
/html/gndy/dyzz/20190115/58084.html
/html/gndy/dyzz/20190114/58082.html
/html/gndy/dyzz/20190113/58077.html
/html/gndy/dyzz/20190113/58076.html
/html/gndy/dyzz/20190112/58072.html
/html/gndy/dyzz/20190112/58071.html
/html/gndy/dyzz/20190111/58068.html
/html/gndy/dyzz/20190110/58064.html
/html/gndy/dyzz/20190110/58063.html
/html/gndy/dyzz/20190109/58059.html
/html/gndy/dyzz/20190109/58058.html
/html/gndy/dyzz/20190108/58055.html
/html/gndy/dyzz/20190108/58054.html
/html/gndy/dyzz/20190107/58038.html
/html/gndy/dyzz/20190107/58037.html
/html/gndy/dyzz/20190104/58019.html
/html/gndy/dyzz/20190104/58018.html
/html/gndy/dyzz/20190104/58017.html
/html/gndy/dyzz/20190103/58015.html
/html/gndy/dyzz/20190103/58014.html
/html/gndy/dyzz/20190101/58000.html
/html/gndy/dyzz/20190101/57999.html

但这只拿到了后面一部分,是打不开的,所以再定义一个全局变量,最终获取一个页面的电影详情页的代码如下:

from lxml import etree
import requests

BASE_DOMAIN = 'https://www.dytt8.net'
url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
    'Referer':'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
}
response = requests.get(url,headers=headers)
#request库会默认使用自己的解码方式因此可能会出现乱码,所以用content
text = response.content.decode('gbk')
#拿到网页源代码之后就要对其进行解析得到element对象
html = etree.HTML(text)
#得到页面以后就需要用xpath得到每个电影的连接
detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
for detail_url in detail_urls:
    print(BASE_DOMAIN+detail_url)

然后整合一下,获取电影详情的代码:

from lxml import etree
import requests

BASE_DOMAIN = 'https://www.dytt8.net'
# url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
HEADERS = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
    'Referer':'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
}
def get_detail_urls(url):
    response = requests.get(url,headers=HEADERS)
    #request库会默认使用自己的解码方式因此可能会出现乱码,所以用content
    text = response.text
    #拿到网页源代码之后就要对其进行解析得到element对象
    html = etree.HTML(text)
    detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
    #由于拿到的detail_urls是没有域名的,所以调用map()函数将列表中的每一项都去做同一件事情
    detail_urls = map(lambda url:BASE_DOMAIN+url,detail_urls)#将detail_urls中的每一个元素都执行url=BASE_DOMAIN+url操作
    return detail_urls#返回每一页中每部电影的详情页面

def parse_detail_page(url):
    response = requests.get(url,headers=HEADERS)
    text = response.content.decode('gbk')
    html = etree.HTML(text)
    title = html.xpath("//div[@class='title_all']//font[@color='#07519a']")
    for x in title:
        print(etree.tostring(x,encoding='utf-8').decode('utf-8'))

def spider():
    base_url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'
    for i in range(1,8):
        url = base_url.format(i)
        detail_urls = get_detail_urls(url)
        #遍历详情页面的url的列表
        for detail_url in detail_urls:
            movie = parse_detail_page(detail_url)
if __name__ == '__main__':
    spider()

但我们想要的是文字,所以:

def parse_detail_page(url):
    response = requests.get(url,headers=HEADERS)
    text = response.content.decode('gbk')
    html = etree.HTML(text)
    title = html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")
    print(title[0])

最终爬取所有电影信息的代码如下:

from lxml import etree
import requests

BASE_DOMAIN = 'https://www.dytt8.net'
# url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
HEADERS = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
    'Referer':'https://www.dytt8.net/html/gndy/dyzz/list_23_1.html'
}
def get_detail_urls(url):
    response = requests.get(url,headers=HEADERS)
    #request库会默认使用自己的解码方式因此可能会出现乱码,所以用content
    text = response.text
    #拿到网页源代码之后就要对其进行解析得到element对象
    html = etree.HTML(text)
    detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
    #由于拿到的detail_urls是没有域名的,所以调用map()函数将列表中的每一项都去做同一件事情
    detail_urls = map(lambda url:BASE_DOMAIN+url,detail_urls)#将detail_urls中的每一个元素都执行url=BASE_DOMAIN+url操作
    return detail_urls#返回每一页中每部电影的详情页面

def pars_info(info,rule):
    return info.replace(rule,"").strip()

def parse_detail_page(url):
    movie = {}
    response = requests.get(url,headers=HEADERS)
    text = response.content.decode('gbk')
    html = etree.HTML(text)
    title = html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0]
    movie['标题'] = title
    try:
        movie['封面'] = html.xpath("//p//img/@src")[0]
        movie['剧照'] = html.xpath("//p//img/@src")[1]
    except:
        pass
    Zoom = html.xpath("//div[@id='Zoom']")[0]
    infos = Zoom.xpath("//text()")
    for index,info in enumerate(infos):#enumerate函数会返回索引和信息
        if info.startswith("◎年  代"):
            info = info.replace("◎年  代","").strip()#strip()方法可以将一个字符串中的所有空格删掉
            movie['年代'] = info
        elif info.startswith('◎产  地'):
            info = info.replace('◎产  地','').strip()
            movie['产地'] = info
        elif info.startswith('◎类  别'):
            info = info.replace('◎类  别','').strip()
            movie['类别'] = info
        elif info.startswith('◎豆瓣评分'):
            info = info.replace('◎豆瓣评分', '').strip()
            movie['豆瓣评分'] = info
        elif info.startswith('◎片  长'):
            info = info.replace('◎片  长','').strip()
            movie['片长'] = info
        elif info.startswith('◎导  演'):
            info = info.replace('◎导  演', '').strip()
            movie['导演'] = info
        elif info.startswith('◎主  演'):
            info = pars_info(info,'◎主  演')
            actors = [info]
            for i in range(index+1,len(infos)):
                actor = infos[i].strip()
                if actor.startswith('◎'):
                    break
                actors.append(actor)
            movie['演员'] = actors
        elif info.startswith('◎简  介'):
            info = pars_info(info,'◎简  介')
            P = []
            for i in range(index+1,len(infos)):
                profile = infos[i].strip()
                P.append(profile)
    try:
        movie['简介'] = P[0]
    except:
        pass
    download_url = html.xpath("//td[@bgcolor='#fdfddf']/a/@href")
    movie['下载地址'] = download_url
    return movie

def spider():
    base_url = 'https://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'
    movies = []
    for i in range(1,8):
        url = base_url.format(i)
        detail_urls = get_detail_urls(url)
        #遍历详情页面的url的列表
        for detail_url in detail_urls:
            movie = parse_detail_page(detail_url)
            movies.append(movie)
    return movies
if __name__ == '__main__':
    Movie = spider()
    for movie in Movie:
        print(movie)

 

  • 3
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值