项目场景:
`要求爬取一个网站的部分数据,用的xpath,但老板要求加入xpath的异常处理,这块属实是难倒孩子了,有没有路过的大佬救命!
问题描述
这是我的代码,怎么对xpath进行异常处理啊大佬们
import requests
from lxml import etree
import os
import json
import csv
from bs4 import BeautifulSoup
url = "http://ggzyjy.xzfwzx.putian.gov.cn/ptsq/005002/005002003/guidetyright.html"
# 一个小的错误处理
def error_process():
from urllib import request, error
try:
response = request.urlopen(url)
except error.HTTPError as e:
print(e.code, '\n', e.reason, '\n', e.headers)
except error.URLError as e:
print(e.reason)
else:
print('Request successfully')
def html_error():
try:
bs = BeautifulSoup(d.html, "html.parser")
results = bs.body
except AttributeError as e:
return None
class total_page():
def download_one_page(self, url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36",
"cookie": "userGuid=1047923792; noOauthRefreshToken=158d7bfcfffc20dae19f058badc206cf; noOauthAccessToken=0853de767af8a462ee2d607baf3274a3; oauthClientId=d65e3629-f440-41a8-8b42-4e173ee7f001; oauthPath=http://ggzyjy.xzfwzx.putian.gov.cn:8808/TPFrame/; oauthLoginUrl=http://127.0.0.1:1112/membercenter/login.html?redirect_uri=; oauthLogoutUrl="
}
resp = requests.get(url)
resp.close()
# #解析
self.html = etree.HTML(resp.text)
# file_handle = open('general_view.txt', mode='w', encoding='UTF-8')
# 拿到每一个地区模块的div
divs = self.html.xpath("/html/body/div[2]/div/div")
self.result = []
for div in divs[1:]:
result = []
self.dress = ''.join(div.xpath("./span/a/text()"))
self.title = ''.join(div.xpath("./ul/li/a/text() | ./ul/li/span/text()")).strip()
self.time = ''.join(div.xpath("./ul/li/span/text()"))
self.total_src = 'http://ggzyjy.xzfwzx.putian.gov.cn/' + ''.join(div.xpath("./span/a/@href"))
# file_handle.writelines([self.total_src + os.linesep, self.title + os.linesep])
print(self.total_src, "提取完毕")
self.result.append(self.total_src)
print(self.title)
if __name__ == '__main__':
error_process()
file_handle = open('general_view.txt', mode='w', encoding='UTF-8')
d = total_page()
d.download_one_page(url)