爬虫-请求模块

1. urllib.request模块

1.版本:
python2 :urllib2、urllib
python3 :把urllib和urllib2合并为urllib.request。
2. 常⽤的⽅法
urllib.request.urlopen(“⽹址”) 作⽤ :向⽹站发起⼀个请求并获取响应。
字节流 = response.read()
字符串 = response.read().decode(“utf-8”)
urllib.request.Request(“⽹址”,headers=“字典”)
urlopen()不⽀持重构
User-Agent
3. 响应对象
read() 读取服务器响应的内容
getcode() 返回HTTP的响应码
geturl() 返回实际数据的URL(防⽌重定向问题)

#爬取百度贴吧函数实现
import urllib.request
# 读取页面
def readPage(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    }
    # 发起请求
    req = urllib.request.Request(url, headers=headers)
    # print(url)
    res = urllib.request.urlopen(req)
    html = res.read().decode('utf-8')
    return html
# 写入文件
def writePage(filename,html):
    with open(filename,'w',encoding='utf-8') as f:
        f.write(html)
# 主函数
def main():
    name = input('请输入贴吧名:')
    start = int(input('请输入起始页:'))
    end = int(input('请输入结束页:'))
    # 对贴吧name进行编码
    kw = {'kw': name}
    kw = urllib.parse.urlencode(kw)
    for i in range(start,end+1):
        pn = (i - 1) * 50
        baseurl = 'https://tieba.baidu.com/f?'
        url = baseurl + kw + '&pn=' + str(pn)
        html = readPage(url)
        filename = '第' + str(i) + '页.html'
        writePage(filename,html)
if __name__ == '__main__':
    main()

2. urllib.parse模块

1. 常⽤⽅法
urlencode(字典)
quote(字符串) (这个⾥⾯的参数是个字符串)

#百度贴吧类实现
import urllib.request
import urllib.parse
class BaiduSpider:
    def __init__(self):
    # 把常用的不变的放到init方法里面
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
        }
        self.baseurl = 'https://tieba.baidu.com/f?'

    def readPage(self,url):
        # 发起请求
        req = urllib.request.Request(url, headers=self.headers)
        # print(url)
        res = urllib.request.urlopen(req)
        html = res.read().decode('utf-8')
        return html
    def writePage(self,filename,html):
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(html)
    def main(self):
        name = input('请输入贴吧名:')
        start = int(input('请输入起始页:'))
        end = int(input('请输入结束页:'))
        # 对贴吧name进行编码
        kw = {'kw': name}
        kw = urllib.parse.urlencode(kw)
        for i in range(start, end + 1):
            pn = (i - 1) * 50
            url = self.baseurl + kw + '&pn=' + str(pn)
            html = self.readPage(url)
            filename = '第' + str(i) + '页.html'
            self.writePage(filename, html)
if __name__ == '__main__':
    # 如果你要调用类对象中的main()方法
    # 先需要实例化
    spider = BaiduSpider()
    spider.main()
#有道翻译
import urllib.request
import urllib.parse
import json

# 请输入你要翻译的内容

key = input('请输入您要翻译的内容:')

# 把提交的form表单的数据转换为bytes类型的数据

data = {
    'i': key,
    'from': 'AUTO',
    'smartresult': 'dict',
    'client': 'fanyideskweb',
    'salt': '15880623642174',
    'sign': 'c6c2e897040e6cbde00cd04589e71d4e',
    'ts': '1588062364217',
    'bv': '42160534cfa82a6884077598362bbc9d',
    'doctype': 'json',
    'version': '2.1',
    'keyfrom':'fanyi.web',
    'action': 'FY_BY_CLICKBUTTION'

}

data = urllib.parse.urlencode(data)

# 把data转换成自己
data = bytes(data,'utf-8')

# 发请求获取响应 注意 需要去掉_o
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'

headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    }

req = urllib.request.Request(url,data=data,headers=headers)
res = urllib.request.urlopen(req)
html = res.read().decode('utf-8')
# 把json类型的字符串转换为字典
r_dict = json.loads(html)
# {"type":"EN2ZH_CN","errorCode":0,"elapsedTime":1,"translateResult":[[{"src":"name","tgt":"的名字"}]]}

r = r_dict['translateResult'] #  [[{"src":"name","tgt":"的名字"}]]
result = r[0][0]['tgt'] # [{"src":"name","tgt":"的名字"}] -->{"src":"name","tgt":"的名字"}
print(result)

3. 请求⽅式

GET : 查询参数在URL地址中显示
POST:
在Request⽅法中添加data参数
urllib.request.Request(url,data=data,headers=headers)
data :表单数据以bytes类型提交,不能是str。

import requests
import json
key = input('请输入您要翻译的内容:')
data = {
    'i': key,
    'from': 'AUTO',
    'smartresult': 'dict',
    'client': 'fanyideskweb',
    'salt': '15880623642174',
    'sign': 'c6c2e897040e6cbde00cd04589e71d4e',
    'ts': '1588062364217',
    'bv': '42160534cfa82a6884077598362bbc9d',
    'doctype': 'json',
    'version': '2.1',
    'keyfrom':'fanyi.web',
    'action': 'FY_BY_CLICKBUTTION'

}
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'

headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    }
res = requests.post(url,data=data,headers=headers)
res.encoding = 'utf-8'
html = res.text
r_dict = json.loads(html)
result = r_dict['translateResult'][0][0]['tgt']
print(result)

4. requests模块

1. 安装 pip install requests ;在开发⼯具中安装。
2. request常⽤⽅法
requests.get(⽹址)
3. 响应对象response的⽅法
response.text 返回unicode格式的数据(str)
response.content 返回字节流数据(⼆进制)
response.content.decode(‘utf-8’) ⼿动进⾏解码
response.url 返回url
response.encode() = ‘编码’

import requests

 headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36','Cookie':'BIDUPSID=4FA733ACE6D4F90A52D99F5F363CD85A; PSTM=1576929032; sug=3; sugstore=0; ORIGIN=0; bdime=0; BAIDUID=4FA733ACE6D4F90A83BE743C46630339:SL=0:NR=10:FG=1; BD_UPN=12314753; BDUSS=VFPUlFGU0RER1ZtcDdTR1lPek5vcEdZcUMtRFNNUkN3VHZTb35Cb1hRR0duOHRlRVFBQUFBJCQAAAAAAAAAAAEAAADzvSajSjdnaGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIYSpF6GEqReR; MCITY=-158%3A; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; BD_CK_SAM=1; BD_HOME=1; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; H_PS_PSSID=1428_31123_21106_31427_31342_30904_31270_31463_30823_31163; PSINO=6; H_PS_645EC=a7848f0JB9WheG1sOVfeKMjHJlUyiwlS1wRIJBkCDEeiFQrxo2RluKd%2B1xs'
    }
 # 发送请求
 https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD
 wd = {'wd':'中国'}
 response = requests.get('https://www.baidu.com/s?',params=wd,headers=headers)

 # 获取响应对象
 print(type(response.text)) # <Response [200]>
 print(response.text) # 返回的是一个str类型的数据
 print(response.content) # 返回的是一个字节流的数据
 print(response.content.decode('utf-8')) # 手动进行解码
 print(response.url)

response = requests.get('https://qq.yh31.com/zjbq/2920180.html')
print(response.content.decode('utf-8'))
response.encoding = 'utf-8'
print(response.text)
  **4. requests模块发送 POST请求** 
  **5 .requests设置代理** 
  使⽤requests添加代理只需要在请求⽅法中(get/post)传递proxies参数就可以了。 

代理⽹站 :
⻄刺免费代理IP:http://www.xicidaili.com/
快代理:http://www.kuaidaili.com/
代理云:http://www.dailiyun.com/

import requests

# 设置代理
proxy = {
    'http':'36.248.129.239:9999'
}
url = 'http://www.httpbin.org/ip'
res = requests.get(url,proxies=proxy)
print(res.text)

6. cookie cookie :
通过在客户端记录的信息确定⽤户身份 HTTP是⼀种⽆连接协议,客户端和服务器交互仅仅限于 请求/响应过程,结束后 断开,下⼀次请求时,服务器会认为是⼀个新的客户端,为了维护他们之间的连接, 让服务器知道这是前⼀个⽤户发起的请求,必须在⼀个地⽅保存客户端信息。

import requests

resp = requests.get('https://www.baidu.com/')
 print(resp.cookies)
 print(resp.cookies.get_dict())

# 模拟登陆

 url = 'https://www.zhihu.com/hot'
 headers = {
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36','cookie':'_zap=f6651dfd-8259-4706-9032-5727ec6593ff; d_c0="AKAWpA4b6BCPTrYOvjRlh-tSAC2xRRy2R_o=|1583234256"; _ga=GA1.2.1237704894.1583234257; _xsrf=EQmHq5EuP5gF6Ja6bH46i3znv0r53niY; _gid=GA1.2.1825342243.1588076980; tst=h; tshl=; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1587811271,1588076979,1588228873,1588246738; SESSIONID=aq5YCH9MiITrFZOobkIFT3EYgtlfG6SlvGwVB2EUB1F; JOID=UFwUAkLNy7aYh4WBEc5mLyDPZL4Dqr-Dyc_LvVyvhOfqydTIe7wBFMWKhoQZq-aJtgz8-vsmayVtXOxAwCJS2b4=; osd=UlgUC0zPz7aRiYeFEcdoLSTPbbABrr-Kx83PvVWhhuPqwNrKf7wIGseOho0XqeKJvwL-_vsvZSdpXOVOwiZS0LA=; capsion_ticket="2|1:0|10:1588254120|14:capsion_ticket|44:Yjk0ZTgyMjRjZDU0NGFlMjgwMzU4ZmZkMWJhYzA5MmI=|fdf13162982002c673847fae50e99c8f22d583ef7e23228c2d3ace7080b56ee7"; z_c0="2|1:0|10:1588254121|4:z_c0|92:Mi4xRjdYeENBQUFBQUFBb0Jha0Rodm9FQ1lBQUFCZ0FsVk5xU09ZWHdEcnRjZFhPSlkwdXpYZXFualQtekloamplbzdn|76d278afd875611d83dba20ed4d6169d34d0bf1447521478b93ec7ec38c443ae"; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1588254123; KLBRSID=ca494ee5d16b14b649673c122ff27291|1588254207|1588252528'
    }
 resp = requests.get(url,headers=headers)
 print(resp.text)

7. session session :
通过在服务端记录的信息确定⽤户身份 这⾥这个session就是⼀个会话。
8. 处理不信任的SSL证书
什么是SSL证书? SSL证书是数字证书的⼀种,类似于驾驶证、护照和营业执照的电⼦副本。 因为配置在服务器上,也称为SSL服务器证书。SSL 证书就是遵守 SSL协 议,由受信任的数字证书颁发机构CA,在验证服务器身份后颁发,具有服务 器身份验证和数据传输加密功能。

import requests
url = 'https://inv-veri.chinatax.gov.cn/'

resp = requests.get(url,verify = False)

print(resp.text)

5. requests 模块源码分析

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值