Python之urllib.request模块及requests模块

1. urllib.request模块

1.1 版本

python2 :urllib2、urllib
python3 :把urllib和urllib2合并,urllib.request

1.2 常用的方法

• urllib.request.urlopen(“网址”) 作用 :向网站发起一个请求并获取响应
• 字节流 = response.read()
• 字符串 = response.read().decode(“utf-8”)
• urllib.request.Request"网址",headers=“字典”) urlopen()不支持重构User-Agent

import urllib.request
# response 是响应对象
response = urllib.request.urlopen('https://qq.yh31.com/zjbq/2920180.html')

# read()把相应对象里面的内容读取出来
html = response.read().decode('utf-8') # 解码bytes数据
print(type(html),html)

# encode() 字符串--> bytes数据类型
# decode() bytes数据类型 --> 字符串

• urlopen()不支持重构User-Agent,
因此使用urllib.request.Request(“网址”,headers=“字典”) 来重构User-Agent向网站发出请求。

import urllib.request

url = 'http://www.baidu.com/'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}

# 1.创建请求对象
req = urllib.request.Request(url,headers=headers)
# 2.获取相应对象
response = urllib.request.urlopen(req)
# 3.读取响应对象内容 read().decode('utf-8')
html = response.read().decode('utf-8')

print(html)

# 使用流程
# 1.利用Request()方法构建请求对象
# 2.利用urlopen()方法获取响应对象
# 3.利用响应对象中的read().decode('utf-8')中的内容

1.3 响应对象

• read() 读取服务器响应的内容
• getcode() 返回HTTP的响应码
• geturl() 返回实际数据的URL(防止重定向问题)

print(response.getcode()) # 返回状态码
print(response.geturl()) # 返回实际给我们数据的url

2. urllib.parse模块

2.1 常用方法

• urlencode(字典)

# https://www.baidu.com/s?wd=%E6%B5%B7%E8%B4%BC%E7%8E%8B
import urllib.parse

name = {'wd':'海贼王'}

name = urllib.parse.urlencode(name)

print(name)

# 输出
wd=%E6%B5%B7%E8%B4%BC%E7%8E%8B

练习1,收索百度美女

import urllib.request
import urllib.parse

# https://www.baidu.com/s?wd=%E6%B5%B7%E8%B4%BC%E7%8E%8B

# 拼接url

baseurl = 'https://www.baidu.com/s?'

name = input('请输入你要搜索的内容:')

# 进行urlencode()编码
wd = {'wd':name}

name = urllib.parse.urlencode(wd)

url = baseurl + name

print(url)
# 输出
# 请输入你要搜索的内容:美女
# https://www.baidu.com/s?wd=%E7%BE%8E%E5%A5%B3
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36','Cookie':'BIDUPSID=4FA733ACE6D4F90A52D99F5F363CD85A; PSTM=1576929032; sug=3; sugstore=0; ORIGIN=0; bdime=0; BAIDUID=4FA733ACE6D4F90A83BE743C46630339:SL=0:NR=10:FG=1; BD_UPN=12314753; BDUSS=VFPUlFGU0RER1ZtcDdTR1lPek5vcEdZcUMtRFNNUkN3VHZTb35Cb1hRR0duOHRlRVFBQUFBJCQAAAAAAAAAAAEAAADzvSajSjdnaGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIYSpF6GEqReR; MCITY=-158%3A; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=; H_PS_645EC=9e896%2FhdaWy7uBsYfIeQKxjhCoWB1I56kSiZk45G7DyeSZsVpqa6Fx3Np2k; COOKIE_SESSION=3052_0_9_9_86_16_0_0_9_8_15_0_0_0_10_0_1588076550_0_1588079592%7C9%235669_143_1586614174%7C9'
}
# 创建请求对象
req = urllib.request.Request(url,headers=headers)

# 获取响应对象
res = urllib.request.urlopen(req)
# 读取响应对象内容
html = res.read().decode('utf-8')

# 写入文件
with open('结果.html','w',encoding='utf-8') as f:

    f.write(html)

• quote(字符串) (这个里面的参数是个字符串)

import urllib.request
import urllib.parse
baseurl = 'https://www.baidu.com/s?wd='

name = input('请输入搜索内容:')

name = urllib.parse.quote(name)

url = baseurl + name

print(url)

练习二,百度贴吧

# 需求:输入要爬取贴吧的名称,在输入爬取的起始页和终止页,把每一页保存到本地

# 分析:1.找url的规律
# https://tieba.baidu.com/f?kw=%E5%A6%B9%E5%AD%90&pn=0 第一页
# https://tieba.baidu.com/f?kw=%E5%A6%B9%E5%AD%90&pn=50第二页
# https://tieba.baidu.com/f?kw=%E5%A6%B9%E5%AD%90&pn=100 第三页
# 页数的规律 pn = (当前页数 - 1)*50
# 2.获取网页的内容
# 3.保存数据

import random
import urllib.request
import urllib.parse

# 随机获取一个ua
headers_list = [{ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'},{'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'}]

headers = random.choice(headers_list)

name = input('请输入贴吧名:')

start = int(input('请输入起始页:'))

end = int(input('请输入结束页:'))

# 对贴吧name进行编码
kw = {'kw':name}

kw = urllib.parse.urlencode(kw)

# 拼接url 发请求 或响应 保存数据

for i in range(start,end+1):

    # 拼接url
    pn = (i - 1)*50
    baseurl = 'https://tieba.baidu.com/f?'

    url = baseurl + kw + '&pn=' + str(pn)


    # 发起请求
    req = urllib.request.Request(url,headers=headers)

    # print(url)
    res = urllib.request.urlopen(req)

    html = res.read().decode('utf-8')


    # 写入文件
    filename = '第' + str(i) + '页.html'

    with open(filename,'w',encoding='utf-8') as f:

        print('正在爬取第%d页'%i)
        f.write(html)

函数的方式

import random
import urllib.request
import urllib.parse

# 读取页面
def readPage(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    }

    # 发起请求
    req = urllib.request.Request(url, headers=headers)

    # print(url)
    res = urllib.request.urlopen(req)

    html = res.read().decode('utf-8')

    return html

# 写入文件
def writePage(filename,html):

    with open(filename,'w',encoding='utf-8') as f:

        f.write(html)



# 主函数
def main():

    name = input('请输入贴吧名:')

    start = int(input('请输入起始页:'))

    end = int(input('请输入结束页:'))

    # 对贴吧name进行编码
    kw = {'kw': name}

    kw = urllib.parse.urlencode(kw)

    for i in range(start,end+1):

        pn = (i - 1) * 50
        baseurl = 'https://tieba.baidu.com/f?'

        url = baseurl + kw + '&pn=' + str(pn)

        html = readPage(url)

        filename = '第' + str(i) + '页.html'

        writePage(filename,html)


if __name__ == '__main__':

    main()

类的方式

import urllib.request
import urllib.parse

class BaiduSpider:

    def __init__(self):

    # 把常用的不变的放到init方法里面
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
        }
        self.baseurl = 'https://tieba.baidu.com/f?'


    def readPage(self,url):
        # 发起请求
        req = urllib.request.Request(url, headers=self.headers)

        # print(url)
        res = urllib.request.urlopen(req)

        html = res.read().decode('utf-8')

        return html



    def writePage(self,filename,html):

        with open(filename, 'w', encoding='utf-8') as f:
            f.write(html)

    def main(self):
        name = input('请输入贴吧名:')

        start = int(input('请输入起始页:'))

        end = int(input('请输入结束页:'))

        # 对贴吧name进行编码
        kw = {'kw': name}

        kw = urllib.parse.urlencode(kw)

        for i in range(start, end + 1):
            pn = (i - 1) * 50

            url = self.baseurl + kw + '&pn=' + str(pn)

            html = self.readPage(url)

            filename = '第' + str(i) + '页.html'

            self.writePage(filename, html)

if __name__ == '__main__':

    # 如果你要调用类对象中的main()方法
    # 先需要实例化
    spider = BaiduSpider()

    spider.main()

3. 请求方式

• GET 特点 :查询参数在URL地址中显示

• POST

  1. 在Request方法中添加data参数 urllib.request.Request(url,data=data,headers=headers)
  2. data :表单数据以bytes类型提交,不能是str

有道翻译实例

import urllib.request
import urllib.parse
import json

# 请输入你要翻译的内容

key = input('请输入您要翻译的内容:')

# 把提交的form表单的数据转换为bytes类型的数据

data = {
    'i': key,
    'from': 'AUTO',
    'smartresult': 'dict',
    'client': 'fanyideskweb',
    'salt': '15880623642174',
    'sign': 'c6c2e897040e6cbde00cd04589e71d4e',
    'ts': '1588062364217',
    'bv': '42160534cfa82a6884077598362bbc9d',
    'doctype': 'json',
    'version': '2.1',
    'keyfrom':'fanyi.web',
    'action': 'FY_BY_CLICKBUTTION'

}

data = urllib.parse.urlencode(data)

# 把data转换成自己
data = bytes(data,'utf-8')

# 发请求获取响应 注意 需要去掉_o
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'

headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    }

req = urllib.request.Request(url,data=data,headers=headers)
res = urllib.request.urlopen(req)
html = res.read().decode('utf-8')
# 把json类型的字符串转换为字典
r_dict = json.loads(html)
# {"type":"EN2ZH_CN","errorCode":0,"elapsedTime":1,"translateResult":[[{"src":"name","tgt":"的名字"}]]}

r = r_dict['translateResult'] #  [[{"src":"name","tgt":"的名字"}]]
result = r[0][0]['tgt'] # [{"src":"name","tgt":"的名字"}] -->{"src":"name","tgt":"的名字"}
print(result)

4. requests模块

4.1 安装

• pip install requests
• 在开发工具中安装

4.2 request常用方法

• requests.get(网址)

import requests

 headers = {
         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36','Cookie':'BIDUPSID=4FA733ACE6D4F90A52D99F5F363CD85A; PSTM=1576929032; sug=3; sugstore=0; ORIGIN=0; bdime=0; BAIDUID=4FA733ACE6D4F90A83BE743C46630339:SL=0:NR=10:FG=1; BD_UPN=12314753; BDUSS=VFPUlFGU0RER1ZtcDdTR1lPek5vcEdZcUMtRFNNUkN3VHZTb35Cb1hRR0duOHRlRVFBQUFBJCQAAAAAAAAAAAEAAADzvSajSjdnaGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIYSpF6GEqReR; MCITY=-158%3A; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; BD_CK_SAM=1; BD_HOME=1; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; H_PS_PSSID=1428_31123_21106_31427_31342_30904_31270_31463_30823_31163; PSINO=6; H_PS_645EC=a7848f0JB9WheG1sOVfeKMjHJlUyiwlS1wRIJBkCDEeiFQrxo2RluKd%2B1xs'
     }
 # 发送请求
 # https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD

 wd = {'wd':'中国'}
 response = requests.get('https://www.baidu.com/s?',params=wd,headers=headers)

 # 获取响应对象

print(response.text) # <Response [200]>

 # print(response.text) # 返回的是一个str类型的数据

 # print(response.content) # 返回的是一个字节流的数据

print(response.content.decode('utf-8')) # 手动进行解码

print(response.url)

# 自动解码方式
response = requests.get('https://qq.yh31.com/zjbq/2920180.html')

# print(response.content.decode('utf-8'))

response.encoding = 'utf-8'

print(response.text)

4.3 响应对象response的方法

• response.text 返回unicode格式的数据(str)
• response.content 返回字节流数据(二进制)
• response.content.decode(‘utf-8’) 手动进行解码
• response.url 返回url
• response.encode() = ‘编码’

4.4 requests模块发送 POST请求

import requests
import json

key = input('请输入您要翻译的内容:')


data = {
    'i': key,
    'from': 'AUTO',
    'smartresult': 'dict',
    'client': 'fanyideskweb',
    'salt': '15880623642174',
    'sign': 'c6c2e897040e6cbde00cd04589e71d4e',
    'ts': '1588062364217',
    'bv': '42160534cfa82a6884077598362bbc9d',
    'doctype': 'json',
    'version': '2.1',
    'keyfrom':'fanyi.web',
    'action': 'FY_BY_CLICKBUTTION'

}

url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'

headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    }

res = requests.post(url,data=data,headers=headers)

res.encoding = 'utf-8'

html = res.text

r_dict = json.loads(html)

result = r_dict['translateResult'][0][0]['tgt']

print(result)

4.5 requests设置代理

• 使用requests添加代理只需要在请求方法中(get/post)传递proxies参数就可以了

import requests

# 设置代理
proxy = {
    'http':'36.248.129.239:9999'
}

url = 'http://www.httpbin.org/ip' # 检测IP的可用性

res = requests.get(url,proxies=proxy)

print(res.text)

• 代理网站
西刺免费代理IP:http://www.xicidaili.com/
快代理:http://www.kuaidaili.com/
代理云:http://www.dailiyun.com/

4.6 cookie

cookie :通过在客户端记录的信息确定用户身份
HTTP是一种无连接协议,客户端和服务器交互仅仅限于 请求/响应过程,结束后断开,下一次请求时,服务器会认为是一个新的客户端,为了维护他们之间的连接,让服务器知道这是前一个用户发起的请求,必须在一个地方保存客户端信息。

import requests

 resp = requests.get('https://www.baidu.com/')

 print(resp.cookies)
 print(resp.cookies.get_dict()) # cookies 信息值

# 模拟登陆

url = 'https://www.zhihu.com/hot'

headers = {
         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36','cookie':'_zap=f6651dfd-8259-4706-9032-5727ec6593ff; d_c0="AKAWpA4b6BCPTrYOvjRlh-tSAC2xRRy2R_o=|1583234256"; _ga=GA1.2.1237704894.1583234257; _xsrf=EQmHq5EuP5gF6Ja6bH46i3znv0r53niY; _gid=GA1.2.1825342243.1588076980; tst=h; tshl=; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1587811271,1588076979,1588228873,1588246738; SESSIONID=aq5YCH9MiITrFZOobkIFT3EYgtlfG6SlvGwVB2EUB1F; JOID=UFwUAkLNy7aYh4WBEc5mLyDPZL4Dqr-Dyc_LvVyvhOfqydTIe7wBFMWKhoQZq-aJtgz8-vsmayVtXOxAwCJS2b4=; osd=UlgUC0zPz7aRiYeFEcdoLSTPbbABrr-Kx83PvVWhhuPqwNrKf7wIGseOho0XqeKJvwL-_vsvZSdpXOVOwiZS0LA=; capsion_ticket="2|1:0|10:1588254120|14:capsion_ticket|44:Yjk0ZTgyMjRjZDU0NGFlMjgwMzU4ZmZkMWJhYzA5MmI=|fdf13162982002c673847fae50e99c8f22d583ef7e23228c2d3ace7080b56ee7"; z_c0="2|1:0|10:1588254121|4:z_c0|92:Mi4xRjdYeENBQUFBQUFBb0Jha0Rodm9FQ1lBQUFCZ0FsVk5xU09ZWHdEcnRjZFhPSlkwdXpYZXFualQtekloamplbzdn|76d278afd875611d83dba20ed4d6169d34d0bf1447521478b93ec7ec38c443ae"; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1588254123; KLBRSID=ca494ee5d16b14b649673c122ff27291|1588254207|1588252528'
     }

resp = requests.get(url,headers=headers)

print(resp.text)

4.7 session

session :通过在服务端记录的信息确定用户身份 这里这个session就是一个指的是会话

4.8 处理不信任的SSL证书

什么是SSL证书?
• SSL证书是数字证书的一种,类似于驾驶证、护照和营业执照的电子副本。因为配置在服务器上,也称为SSL服务器证书。SSL 证书就是遵守 SSL协议,由受信任的数字证书颁发机构CA,在验证服务器身份后颁发,具有服务器身份验证和数据传输加密功能

import requests
url = 'https://inv-veri.chinatax.gov.cn/'

resp = requests.get(url,verify = False) # 加上verify = False即可访问

print(resp.text)
  • 3
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
的使用有什么区别? 回答: urllib.requestrequests是两个用于发起HTTP请求的Python库。它们之间的主要区别在于使用的便捷性和功能。urllib.requestPython标准库中的一部分,它提供了基本的HTTP请求功能,但使用起来相对繁琐。需要手动构造请求对象、添加头文件等。而requests库则是一个第三方库,对urllib进行了进一步封装,提供了更简洁、易用的API。使用requests库,只需要调用对应的方法即可发起HTTP请求,无需手动构造请求对象和头文件。因此,总体而言,requestsurllib.request更方便。但在某些特定情况下,如模拟用户登录等,使用urllib.request进行自定义定制的HTTP请求也是必不可少的。 #### 引用[.reference_title] - *1* [pythonurllib.requestrequests的使用和区别](https://blog.csdn.net/ytraister/article/details/106376388)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] - *2* *3* [python爬虫中urllib.requestrequests的区别](https://blog.csdn.net/weixin_42213622/article/details/105852794)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值