requests库基础语法
r.encoding | 是从header里的charset提取编码 |
r.apparent_encoding | 从文本里分析出编码 |
>>> import requests
>>> r=requests.get('https://fanyi.baidu.com/')
>>> r.encoding
'UTF-8'
>>> r.apparent_encoding
'utf-8'
>>> r.headers
{
'Connection': 'keep-alive', 'Content-Encoding': 'gzip', 'Content-Type': 'text/html; charset=UTF-8', 'Date': 'Wed, 15 Jul 2020 05:53:01 GMT', 'P3p': 'CP=" OTI DSP COR IVA OUR IND COM "', 'Server': 'yunjiasu', 'Set-Cookie': 'BAIDUID=E2CF07F3583670FD2FCB32ADDBFE0AEF:FG=1; expires=Thu, 15-Jul-21 05:53:01 GMT; max-age=31536000; path=/; domain=.baidu.com; version=1', 'Tracecode': '31815643890307597834071513', 'Vary': 'Accept-Encoding', 'Yjs-Id': '91e85a661365296d-106', 'Transfer-Encoding': 'chunked'}
>>>
requests库异常
处理异常最重要的就是:
r.raise_for_status()
爬取网页的一个基本框架
import requests
def getHtmlText(url):
try:
r=requests.get(url,timeout=30)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
return '产生异常'
if __name__=='__main__'