Python爬虫(二)

Python爬虫(二)

一、请求对象的定制—User-Agent反爬机制

请求对象的定制:由于urlopen方法中没有字典类型的数据存储,所以headers不能直接存储进去
请求对象的定制的目的:是为了解决反爬的第一种手段User-Agent.

import urllib.request
url = 'https://cn.bing.com/search?q=CSDN'
# url的组成
# http/https --> 协议
# www.baidu.com --> 主机
# 端口号--> http : 80 https : 443 mysql : 3306
# 路径 --> search
# 参数 --> q=CSDN  (?之后)
# 锚点 --> #

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44'
}
# 请求对象的定制
# 由于urlopen方法中没有字典类型的数据存储,所以headers不能直接存储进去

request = urllib.request.Request(url = url , headers = headers) # 此时需要指定参数名,若不指定无法做到实参与形参的一一对应
response = urllib.request.urlopen(request)
content = response.read().decode('utf8')
print(content)

二、get请求的quote方法
将 ‘周杰伦’ 转换成unicode编码的格式 --> 依赖于urllib.parse

import urllib.parse
# 将 周杰伦 转换成unicode编码的格式 --> 依赖于urllib.parse
name = urllib.parse.quote('周杰伦')
print(name)
import urllib.request
import urllib.parse
url = 'https://www.baidu.com/s?wd='  # https://www.baidu.com/s?wd=%E5%91%A8%E6%9D%B0%E4%BC%A6
headers = {
    'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44'
}
# 将 周杰伦 转换成unicode编码的格式 --> 依赖于urllib.parse
name = urllib.parse.quote('周杰伦')
print(name)
url = url + name
print(url)
#请求对象的定制
request = urllib.request.Request(url= url ,headers = headers)
response = urllib.request.urlopen(request)
content = response.read().decode()
print(content)

三、get请求的urlencode方法

import urllib.parse
data = {
    'wd': '周杰伦',
    'sex':'男',
    'location':'中国台湾省'
}
a = urllib.parse.urlencode(data)
print(a)

在这里插入图片描述
步骤:
1.获取请求资源路径
2.请求对象的定制
3.模拟浏览器向服务器发送请求
4.获取网页源码数据
5.输出

import urllib.parse
import urllib.request
base_url = 'https://www.baidu.com/s?'
data = {
    'wd': '周杰伦',
    'sex':'男',
    'location':'中国台湾省'
}
new_url = urllib.parse.urlencode(data)
# 获取请求资源路径
url = base_url + new_url
headers = {
    'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44'
}
# 请求对象的定制
request = urllib.request.Request(url = url , headers = headers)
# 模拟浏览器向服务器发送请求
response = urllib.request.urlopen(request)
# 获取网页源码数据
content = response.read().decode('utf-8')
print(content)

四、post请求百度翻译案例

# post请求
import urllib.request
import urllib.parse
url = 'https://fanyi.baidu.com/sug'
headers = {
    'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44'
}
data = {
    'kw' : 'spider'
}
# post请求的参数 必须进行编码
data = urllib.parse.urlencode(data).encode('utf-8')
# post请求的参数不会拼接在url后面,而是需要放在请求对象定制的参数中
# post请求的参数必须要进行编码
request = urllib.request.Request(url= url ,data=data,headers=headers)
# 模拟浏览器向服务器发送请求
response = urllib.request.urlopen(request)
# 获取响应的数据
content = response.read().decode('utf-8')
print(content)
# post请求方式的参数必须编码 data = urllib.parse.urlencode(data)
# 编码之后,必须调用encode()方法 data = urllib.parse.urlencode(data).encode('utf-8')
# 参数是放在请求对象定制的方法中 request = urllib.request.Request(url= url ,data=data,headers=headers)
# 将字符串变成json对象
import json
obj = json.loads(content)
print(obj)
{"errno":0,"data":[{"k":"spider","v":"n. \u8718\u86db; \u661f\u5f62\u8f6e\uff0c\u5341\u5b57\u53c9; \u5e26\u67c4\u4e09\u811a\u5e73\u5e95\u9505; \u4e09\u811a\u67b6"},{"k":"Spider","v":"[\u7535\u5f71]\u8718\u86db"},{"k":"SPIDER","v":"abbr. SEMATECH process induced damage effect revea"},{"k":"spiders","v":"n. \u8718\u86db( spider\u7684\u540d\u8bcd\u590d\u6570 )"},{"k":"spidery","v":"adj. \u50cf\u8718\u86db\u817f\u4e00\u822c\u7ec6\u957f\u7684; \u8c61\u8718\u86db\u7f51\u7684\uff0c\u5341\u5206\u7cbe\u81f4\u7684"}]}
{'errno': 0, 'data': [{'k': 'spider', 'v': 'n. 蜘蛛; 星形轮,十字叉; 带柄三脚平底锅; 三脚架'}, {'k': 'Spider', 'v': '[电影]蜘蛛'}, {'k': 'SPIDER', 'v': 'abbr. SEMATECH process induced damage effect revea'}, {'k': 'spiders', 'v': 'n. 蜘蛛( spider的名词复数 )'}, {'k': 'spidery', 'v': 'adj. 像蜘蛛腿一般细长的; 象蜘蛛网的,十分精致的'}]}

五、post请求百度详细翻译案例

# post请求百度详细翻译
import urllib.request
import urllib.parse
url = 'https://fanyi.baidu.com/v2transapi?from=en&to=zh'
headers = {
    # 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44'
    # 'Accept':' */*',
    # # 'Accept-Encoding':' gzip, deflate, br',
    # 'Accept-Language':' zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    # 'Acs-Token':' 1655794810538_1655870963719_doKLwiFB8fBU4raSo3DyVeE1j9Kcfar9eZ7/KCCkYi/xVTDeTcb5b1ZVsJLMLyWCRoSk7D9QXGzMwPMrG3k9/KF+lripnq/IT/lKZRFtsx2mgqAQFfmEOVpOyCMUk/ysqeKlDRLQxm9QGT+ZmbMxC8YUGkIvL2BYvUlZiUToEScm+WalBblg8MnDH6ZQdV4xk5vDhekXq2xi/5qMufx5qP0PAghdX4ubcP9wlHGUuJdRgkw+14HDZLZTNuiVAU4iP+qn59DPLkMrtA7dbX+0mgm2nU6WbWNyZDgSVGFkv1jv/09OOEiCngeNxqkpRMVrDEQY2PusrEb0aJxuyf7Ly5xbZlXqoYHAJ+cv2UYoItE=',
    # 'Connection':' keep-alive',
    # 'Content-Length':' 135',
    # 'Content-Type':' application/x-www-form-urlencoded; charset=UTF-8',
    'Cookie: BAIDUID_BFESS=4AE41A61F199E97DFDEC24D9C529DC45:FG=1; BIDUPSID=40188254DB80FE4D23CEB3A62ECAE855; PSTM=1655736944; BAIDUID=40188254DB80FE4D6469C246B7564749:FG=1; ZFY=7IyWFSZJ9Jg6dusQDydJfYyIgD9iZLLDKF:AcE24Fi94':'C; ariaDefaultTheme=undefined; RT="z=1&dm=baidu.com&si=d0sjqupvsj&ss=l4mvs3fg&sl=w&tt=evz&bcn=https%3A%2F%2Ffclog.baidu.com%2Flog%2Fweirwood%3Ftype%3Dperf&ld=9vue&ul=9ysu&hd=9yw5"; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; PSINO=6; H_PS_PSSID=36553_36673_36454_31253_34813_36165_36569_36074_36520_26350_36467_36311; BA_HECTOR=212k80a00580ah2h801hb4vm914; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1655866475; APPGUIDE_10_0_2=1; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1655870954; ab_sr=1.0.1_ZjcyNDAwOWM1ZGNhNTQ5YTVlZjdkMmFkZDU1MjJlZDBjOThhYzQ3OWUwZmE1N2VhNmVhNjA4MDM4Mzg4MTVmNzA3YzZjNDUxNzI5MGM1ZTNlZWM0NTQ5MzhkYzdjZDExMjgxMmRmZjRlYTdjYjAwNWJhMTVhNmVjNmI1YTZjMmE3N2MzN2M1MGZkOWU3MDMxZTViZmNiMGJkYTNkMWNiZA==',
    # 'Host':' fanyi.baidu.com',
    # 'Origin: https':'//fanyi.baidu.com',
    # 'Referer: https':'//fanyi.baidu.com/?aldtype=16047',
    # 'sec-ch-ua':' " Not A;Brand";v="99", "Chromium";v="102", "Microsoft Edge";v="102"',
    # 'sec-ch-ua-mobile':' ?0',
    # 'sec-ch-ua-platform':' "Windows"',
    # 'Sec-Fetch-Dest':' empty',
    # 'Sec-Fetch-Mode':' cors',
    # 'Sec-Fetch-Site':' same-origin',
    # 'User-Agent':' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.124 Safari/537.36 Edg/102.0.1245.44',
    # 'X-Requested-With':' XMLHttpRequest',
}
data = {
    'from' : 'en',
    'to': 'zh',
    'query': 'love',
    'transtype': 'realtime',
    'simple_means_flag': '3',
    'sign': '198772.518981',
    'token': 'f49e30e81d08ca57c8404ba16285a065',
    'domain': 'common',
}
# post请求的参数 必须进行编码
data = urllib.parse.urlencode(data).encode('utf-8')
# 请求对象的定制
request = urllib.request.Request(url=url,data=data,headers=headers)
# 模拟浏览器向服务器发送请求
response = urllib.request.urlopen(request)
# 获取响应的数据
content = response.read().decode('utf-8')
import json
obj = json.loads(content)
print(obj)

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

敷衍zgf

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值