背景
很多网站会限制程序去爬数据,所以必须要伪装隐藏自己,模拟是浏览器发起的请求
添加head和延迟访问
方式一 发起的request请求前,加上head
示例代码如下:
header中加上User-Agent属性
import urllib.request
import urllib.parse
import json
content=input('Enter the word that needs translated:')
url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'#_o要去掉,否则会出先error_code:50的报错
header = {}
header['User-Agent']='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
data={}
#以下为审查元素,可以在网站翻译页面按F12查看,i和doctype键不可少,其他都可以删除,不影响爬取翻译
data['i']=content
data['from']='AUTO'
data['to']='AUTO'
data['smartresult']='dict'
data['client']='fanyideskweb'
data['salt']='15601659811655'
data['sign']='78817b046452f9663a2b36604f220360'
data['doctype']='json'
data['version']='2.1'
data['keyfrom']='fanyi.web'
data['action']='FY_BY_REALTTIME'
data=urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url,data,header)
response=urllib.request.urlopen(req)
html=response.read().decode('utf-8')
target=json.loads(html)
print('result:%s'%(target['translateResult'][0][0]['tgt']))
User-Agent属性的取值在浏览器的开发者工具,network中request headers可查到
方式二 生成request后,加上header
在request生成后,加上*request.add_header()*修改
import urllib.request
import urllib.parse
import json
content=input('Enter the word that needs translated:')
url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'#_o要去掉,否则会出先error_code:50的报错
#header = {}
#header['User-Agent']='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
data={}
#以下为审查元素,可以在网站翻译页面按F12查看,i和doctype键不可少,其他都可以删除,不影响爬取翻译
data['i']=content
data['from']='AUTO'
data['to']='AUTO'
data['smartresult']='dict'
data['client']='fanyideskweb'
data['salt']='15601659811655'
data['sign']='78817b046452f9663a2b36604f220360'
data['doctype']='json'
data['version']='2.1'
data['keyfrom']='fanyi.web'
data['action']='FY_BY_REALTTIME'
data=urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url,data)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36')
response=urllib.request.urlopen(req)
html=response.read().decode('utf-8')
target=json.loads(html)
print('result:%s'%(target['translateResult'][0][0]['tgt']))
延迟访问
- 加入time.sleep休眠时间
- 输入退出条件
import urllib.request
import urllib.parse
import json
import time
while True:
content=input('Enter the word that needs translated(输入q!退出exit ):')
if (content == 'q!'):
break
url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'#_o要去掉,否则会出先error_code:50的报错
#header = {}
#header['User-Agent']='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
data={}
#以下为审查元素,可以在网站翻译页面按F12查看,i和doctype键不可少,其他都可以删除,不影响爬取翻译
data['i']=content
data['from']='AUTO'
data['to']='AUTO'
data['smartresult']='dict'
data['client']='fanyideskweb'
data['salt']='15601659811655'
data['sign']='78817b046452f9663a2b36604f220360'
data['doctype']='json'
data['version']='2.1'
data['keyfrom']='fanyi.web'
data['action']='FY_BY_REALTTIME'
data=urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url,data)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36')
response=urllib.request.urlopen(req)
html=response.read().decode('utf-8')
target=json.loads(html)
print('result:%s'%(target['translateResult'][0][0]['tgt']))
time.sleep(3)
代理
步骤
通过代理服务来访问
- 参数是一个字典{‘类型’,‘代理ip:端口号’}
proxy_support = urllib.request.ProxyHandler({}) - 定制创建一个opener
opener = urllib.request.builde_opener(proxy_support) - 安装opener
urllib.request.install_opener(opener) - 调用opener
opener.open(url)
import urllib.request
import random
url = 'http://www.whatismyip.com.tw'
iplist = ['119.6.144.73:81','183.203.208.166:8118','111.1.32.28:81']
proxy_support = urllib.request.ProxyHandler({'http':random.choice(iplist)})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8')
print(html)