python3 for index_Python3 抓取数据报错IndexError: list index out of range

该楼层疑似违规已被系统折叠 隐藏此楼查看此楼

from lxml import etree

import requests

BASE_DOMAIN = "http://www.dytt8.net"

HEADERS = {

'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

}

def get_detail_urls(url):

response = requests.get(url, headers=HEADERS)

text = response.text

html = etree.HTML(text)

detail_urls = html.xpath('//table[@class="tbspan"]//a/@href')

detail_urls = map(lambda url:BASE_DOMAIN+url,detail_urls)

return detail_urls

def parse_detail_page(url):

movie = {}

response = requests.get(url,headers=HEADERS)

text = response.content.decode('gb2312','ignore')

html = etree.HTML(text)

title = html.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')[0]

movie['title'] = title

zoomE = html.xpath('//div[@id="Zoom"]')[0]

imgs = zoomE.xpath('.//img/@src')

cover = imgs[0]

screenshot = imgs[1]

movie['cover'] = cover

movie['screenshot'] = screenshot

def parse_info(info,rule):

return info.replace(rule,'').strip()

infos = zoomE.xpath('.//text()')

# for info in infos:

for index,info in enumerate(infos): #有索引

if info.startswith('◎年  代'):

info = parse_info(info,'◎年  代')

movie['year'] = info

elif info.startswith('◎产  地'):

info = parse_info(info,'◎产  地')

movie['country'] = info

elif info.startswith('◎类  别'):

info = parse_info(info,'◎类  别')

movie['category'] = info

elif info.startswith('◎语  言'):

info = parse_info(info,'◎语  言')

movie['language'] = info

elif info.startswith('◎字  幕'):

info = parse_info(info,'◎字  幕')

movie['sub-title'] = info

elif info.startswith('◎豆瓣评分'):

info = parse_info(info,'◎豆瓣评分')

movie['douban_rating'] = info

elif info.startswith('◎片  长'):

info = parse_info(info,'◎片  长')

movie['duration'] = info

elif info.startswith('◎导  演'):

info = parse_info(info,'◎导  演')

movie['director'] = info

elif info.startswith('◎主  演'):

info = parse_info(info, '◎主  演')

actors = [info]

for x in range(index+1,len(infos)):

actor = infos[x].strip()

if actor.startswith('◎'):

break

actors.append(actor)

movie['actors'] = actors

elif info.startswith('◎简  介'):

info = parse_info(info,'◎简  介')

for x in range(index + 1, len(infos)):

profile = infos[x].strip()

if profile.startswith('【下载地址】'):

break

movie['profile'] = profile

# download_url = html.xpath['//td[@bgcolor="#fdfddf"]/a/@href']

# movie['download_url'] = download_url

return movie

def spider():

base_url = 'http://www.dytt8.net/html/gndy/dyzz/list_23_{}.html'

moives = []

for x in range(1,8):

#第一个for循环,是用来控制总共7页

url = base_url.format(x)

detail_urls = get_detail_urls(url)

for detail_url in detail_urls:

#第二个for循环,是用来遍历一页中所有电影的详情url

movie = parse_detail_page(detail_url)

moives.append(movie)

print(movie)

if __name__ == '__main__':

spider()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值