<!-- 爬虫远代码 没问题可以copy -->
from urllib import request,parse
from urllib.error import HTTPError,URLError
#需求:post(url,form)
#需求:get(url,headers)
def get(url,headers=None):
return urlrequests(url,headers=headers)
def post(url,form,headers=None):
return urlrequests(url,form,headers=headers)
#封装爬虫页面
# 判断获取方式,有表单式POST,没有是GET
#1. 传入url
#2. user_agent
#3. headers
#4. 定义Request
#5. urlopen
#6. 返回byte数组
def urlrequests(url,form=None,headers=None):
user_agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
if headers ==None:
headers={
'User-Agent':user_agent
}
html_bytes=b""
try:
if form:
# POST
form_str = parse.urlencode(form)
# print(form_str)
# 2.2 转换成bytes
form_bytes = form_str.encode('utf-8')
req = request.Request(url, data=form_bytes, headers=headers)
else:
#GET
req=request.Request(url,headers=headers)
response=request.urlopen(req)
html_bytes=response.read().decode('utf-8')
except HTTPError as e:
pritn(e)
except URLError as e:
pritn(e)
return html_bytes
if __name__=="__main__":
# url 地址 本文为百度,hao123 自己可以换
#百度翻译 单词
# url = 'http://fanyi.baidu.com/sug'
# form = {
# 'kw': '呵呵'
# }
# html_bytes = post(url, form=form)
# print(html_bytes)
# hao 123 网页爬去
url = 'https://www.hao123.com/'
html_byte = get(url)
print(html_byte)