求助:python爬虫如何对xpath提取过程进行预防的异常处理

项目场景:

`要求爬取一个网站的部分数据,用的xpath,但老板要求加入xpath的异常处理,这块属实是难倒孩子了,有没有路过的大佬救命!


问题描述

这是我的代码,怎么对xpath进行异常处理啊大佬们

import requests
from lxml import etree
import os
import json
import csv
from bs4 import BeautifulSoup


url = "http://ggzyjy.xzfwzx.putian.gov.cn/ptsq/005002/005002003/guidetyright.html"

# 一个小的错误处理
def error_process():
    from urllib import request, error
    try:
        response = request.urlopen(url)
    except error.HTTPError as e:
        print(e.code, '\n', e.reason, '\n', e.headers)
    except error.URLError as e:
        print(e.reason)
    else:
        print('Request successfully')

def html_error():
    try:
        bs = BeautifulSoup(d.html, "html.parser")
        results = bs.body
    except AttributeError as e:
        return None

class total_page():
    def download_one_page(self, url):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36",
            "cookie": "userGuid=1047923792; noOauthRefreshToken=158d7bfcfffc20dae19f058badc206cf; noOauthAccessToken=0853de767af8a462ee2d607baf3274a3; oauthClientId=d65e3629-f440-41a8-8b42-4e173ee7f001; oauthPath=http://ggzyjy.xzfwzx.putian.gov.cn:8808/TPFrame/; oauthLoginUrl=http://127.0.0.1:1112/membercenter/login.html?redirect_uri=; oauthLogoutUrl="

        }
        resp = requests.get(url)
        resp.close()
        # #解析
        self.html = etree.HTML(resp.text)
        # file_handle = open('general_view.txt', mode='w', encoding='UTF-8')

        # 拿到每一个地区模块的div
        divs = self.html.xpath("/html/body/div[2]/div/div")
        self.result = []
        for div in divs[1:]:

            result = []
            self.dress = ''.join(div.xpath("./span/a/text()"))
            self.title = ''.join(div.xpath("./ul/li/a/text() | ./ul/li/span/text()")).strip()
            self.time = ''.join(div.xpath("./ul/li/span/text()"))
            self.total_src = 'http://ggzyjy.xzfwzx.putian.gov.cn/' + ''.join(div.xpath("./span/a/@href"))
            # file_handle.writelines([self.total_src + os.linesep, self.title + os.linesep])
            print(self.total_src, "提取完毕")
            self.result.append(self.total_src)
            print(self.title)
if __name__ == '__main__':
    error_process()
    file_handle = open('general_view.txt', mode='w', encoding='UTF-8')
    d = total_page()
    d.download_one_page(url)








  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值