python 自(3)1使用urlencode多个参数请求使用 2百度翻译post请求post无法添加路径 3百度翻译全部数据获取 4豆瓣get请

1  使用urlencode  多个参数请求使用 

# 使用urlencode  多个参数请求使用
# https://www.baidu.com/s?wd=周杰伦&sex=男  网页
import urllib.request
import urllib.parse

base_url = 'https://www.baidu.com/s?'

data = {
    'wd': '周杰伦',
    'sex': '男',
    'sing':'歌曲'
}

new_data = urllib.parse.urlencode(data)

url = base_url + new_data

headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"}

request = urllib.request.Request(url=url, headers=headers)
#
# 模拟服务器发送请求
response = urllib.request.urlopen(request)

# 读取html页面数据 获取相应源码
content = response.read().decode("utf8")
print(content)

2  百度翻译post请求    post无法添加路径 

request = urllib.request.Request(url=url, headers=headers, data=data)
# 百度翻译  post请求
# 引入
import urllib.request
# 路径 在浏览器 F12找到 对应自己需要的路径
url = 'https://fanyi.baidu.com/sug'
# 请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"}
# data
data = {
    'kw': 'spider'
}
# 转换data数据  在转换成utf-8格式
data = urllib.parse.urlencode(data).encode('utf-8')
# 防止反爬 post无法拼接路径  所有只能在 requst中定义  定义数据
request = urllib.request.Request(url=url, headers=headers, data=data)

# 模拟服务器发送请求
response = urllib.request.urlopen(request)

# 读取html页面数据 获取相应源码
content = response.read().decode("utf-8")
# 引入
import json
# 字符串转换成json
obj = json.loads(content)

print(obj)

3  百度翻译全部数据获取

# 百度翻译全部数据
import urllib.request
import urllib.parse

# 找到全部数据的路径
url = 'https://fanyi.baidu.com/v2transapi?from=en&to=zh'
# 请求头  换成了cookie
headers = {"Cookie":"BIDUPSID=359429789B4E589B318E621011F98A01; PSTM=1642150308; __yjs_duid=1_509dd28c4aec6cb726c25a04881a2a151640083333034; BDUSS=lxa25GVFZQZ0RmYUJHRnp2eERudWJ6eVBiOTE0VmJVVllJdXlKY0QzYkowaDVpRVFBQUFBJCQAAAAAAAAAAAEAAADMN6iOb8rFyKW1xLCuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMlF92HJRfdhVX; BDUSS_BFESS=lxa25GVFZQZ0RmYUJHRnp2eERudWJ6eVBiOTE0VmJVVllJdXlKY0QzYkowaDVpRVFBQUFBJCQAAAAAAAAAAAEAAADMN6iOb8rFyKW1xLCuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMlF92HJRfdhVX; REALTIME_TRANS_SWITCH=1; HISTORY_SWITCH=1; FANYI_WORD_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; H_WISE_SIDS=219946_234020_131862_216850_213356_214798_219942_213030_110085_243885_244478_244720_240590_245600_248174_247146_256083_254833_256348_256739_254317_257586_255230_257995_258723_258838_258984_258958_230288_256222_259708_258773_234295_234208_257262_259643_255910_254300_260278_256230_260356_260364_253022_255212_258081_260330_260352_251786_260805_260836_259408_259300_259422_259584_260717_261043_261028_261116_258578_261664_261471_261712_261629_261863_262052_262067_259033_262184_262165_262226_262229_261410_262263_260441_259403_236312_262487_262296_262452_261869_262621_262608_262606_262597_249410_259518_259944_262775_262743_262747_262906_263074_256999_263203_262987_262282_253901_263301_263278_243615_261683_261620_259447_263416_245653_263549_257289_8000083_8000126_8000142_8000150_8000156_8000164_8000171_8000177_8000195_8000203; H_WISE_SIDS_BFESS=219946_234020_131862_216850_213356_214798_219942_213030_110085_243885_244478_244720_240590_245600_248174_247146_256083_254833_256348_256739_254317_257586_255230_257995_258723_258838_258984_258958_230288_256222_259708_258773_234295_234208_257262_259643_255910_254300_260278_256230_260356_260364_253022_255212_258081_260330_260352_251786_260805_260836_259408_259300_259422_259584_260717_261043_261028_261116_258578_261664_261471_261712_261629_261863_262052_262067_259033_262184_262165_262226_262229_261410_262263_260441_259403_236312_262487_262296_262452_261869_262621_262608_262606_262597_249410_259518_259944_262775_262743_262747_262906_263074_256999_263203_262987_262282_253901_263301_263278_243615_261683_261620_259447_263416_245653_263549_257289_8000083_8000126_8000142_8000150_8000156_8000164_8000171_8000177_8000195_8000203; MCITY=-53%3A; BAIDUID=4FA510A05410004B33EF51007DA08923:FG=1; BA_HECTOR=01852k8h2704a48h24058g8i1ig851k1p; ZFY=OyVrCDKol7NbNTbKUbw885OfM9tG9YDHAVQiqBjirHg:C; BAIDUID_BFESS=4FA510A05410004B33EF51007DA08923:FG=1; delPer=0; PSINO=1; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm; BDRCVFR[C0sZzZJZb70]=mk3SLVN4HKm; H_PS_PSSID=; BDORZ=FFFB88E999055A3F8A630C64834BD6D0; BCLID=6775486379151272863; BCLID_BFESS=6775486379151272863; BDSFRCVID=S-FOJexroG0ZmSbq3aoeqaaMUuweG7bTDYrEOwXPsp3LGJLVFakFEG0Pts1-dEu-S2OOogKKLeOTHGCF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; BDSFRCVID_BFESS=S-FOJexroG0ZmSbq3aoeqaaMUuweG7bTDYrEOwXPsp3LGJLVFakFEG0Pts1-dEu-S2OOogKKLeOTHGCF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF=tRAOoC_-tDvDqTrP-trf5DCShUFsttLjB2Q-XPoO3KJADfOPKjbHhn_L-fQuLRQf5mkf3fbgy4op8P3y0bb2DUA1y4vp0toW3eTxoUJ2-KDVeh5Gqq-KXU4ebPRiWPQ9Qgbx5hQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0hD89DjKKD6PVKgTa54cbb4o2WbCQL56P8pcN2b5oQT8lhJbab6JKaKTD3RjzQ45beq06-lOUWJDkXpJvQnJjt2JxaqRC3JjOsl5jDh3MKToDb-oteltHB2Oy0hvcBn5cShnjLUjrDRLbXU6BK5vPbNcZ0l8K3l02V-bIe-t2XjQh-p52f6_JtRIf3f; H_BDCLCKID_SF_BFESS=tRAOoC_-tDvDqTrP-trf5DCShUFsttLjB2Q-XPoO3KJADfOPKjbHhn_L-fQuLRQf5mkf3fbgy4op8P3y0bb2DUA1y4vp0toW3eTxoUJ2-KDVeh5Gqq-KXU4ebPRiWPQ9Qgbx5hQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0hD89DjKKD6PVKgTa54cbb4o2WbCQL56P8pcN2b5oQT8lhJbab6JKaKTD3RjzQ45beq06-lOUWJDkXpJvQnJjt2JxaqRC3JjOsl5jDh3MKToDb-oteltHB2Oy0hvcBn5cShnjLUjrDRLbXU6BK5vPbNcZ0l8K3l02V-bIe-t2XjQh-p52f6_JtRIf3f; APPGUIDE_10_6_2=1; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1694776408; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1694777058; ab_sr=1.0.1_ZWNmZjBlMjY0OWYyNjA1ZTYxNDRhZTI2NjIyNmJjOTcwZGE5ZjU3OTQ1Yjg3ZDFlMTgyNDM1MDczOTgwMmE4YWIwMGE1NmM5NjliNzAzY2YwYmE1MDkwY2M5YjYzODdiOWY2N2Y1OGRjNmRkODdkOTc5MTVhY2YxNjQxMTA1ZjZlMDNiYjVlMDQxNWNhNzk2OGY0NjM0OGM3YjBiYzc5ODQzZmY1N2IwYTA3MzQ0Njg2ZTYyYWFjY2RkYTNlYTUy"}
# data  在foom data找到全部数据添加
data = {
    "from": "en",
"to": "zh",
"query": "spider",
"transtype": "realtime",
"simple_means_flag": "3",
"sign": "63766.268839",
"token": "3dfdea119e17b74fb8fad08c2071a657",
"domain": "common",
"ts":" 1694777076331"
}

data = urllib.parse.urlencode(data).encode('utf-8')
# 防止反爬 post无法拼接路径  所有只能在 requst中定义
request = urllib.request.Request(url=url, headers=headers, data=data)

# 模拟服务器发送请求
response = urllib.request.urlopen(request)

# 读取html页面数据 获取相应源码
content = response.read().decode("utf-8")
# 引入
import json
# 字符串转换成json
obj = json.loads(content)

print(obj)

4  豆瓣get请求 第一页


import urllib.request

url = 'https://movie.douban.com/chart'
headers = {
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"}

# 防止反爬
request = urllib.request.Request(url=url, headers=headers)

# 模拟服务器发送请求
response = urllib.request.urlopen(request)

# 读取html页面数据 获取相应源码
content = response.read().decode("utf-8")

fp = open('douban.json', 'w', encoding='utf-8')
fp.write(content)

5. get豆瓣请求多页数据下载


# https://movie.douban.com/top250?start=0&filter=


# https://movie.douban.com/top250?start=25&filter=



# https://movie.douban.com/review/best/?start=0

# https://movie.douban.com/review/best?start=20

# 引入
import urllib.parse
import urllib.request

# 定义第一个方法 定义page路径
def  create_request(page):
    # 前边一样不变的路径
    base_url = "https://movie.douban.com/top250?"

    # 定义后边不太一样的路径 
    data ={
    'start' :( page - 1 )*10,  #定义页数
     'limit':25                #每页个数
     }
    
    # 转换data路径编码
    data = urllib.parse.urlencode(data)

    # 拼接打印路径  base_url +data 成一个完整路径
    url = base_url +data
    print(url)

    # 请求头
    headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"}

    # 防止反扒 特定
    request = urllib.request.Request(url=url,headers=headers)
    # 返回 request
    return  request

# # 定义第二个方法 定义get_conten  请求数据
def get_conten(request):
    # 模拟服务器发送请求
    response = urllib.request.urlopen(request)
    # 读取html页面数据 获取相应源码
    content = response.read().decode('utf_8')
    return  content

# # 定义第二个方法 定义down_load 下载
def down_load(page,content): # 获取 page页码 
    # 拼接下载路径  以及下载格式
    with open('doban'+ str(page) +'.json','w',encoding='utf-8')as fp:
        fp.write(content)


if __name__ == '__main__':
    start_page = int(input('开始页码'))
    end_page = int(input('结束页码'))
    # 遍历
    for page in range(start_page,end_page+1):
        # request =  定义page路径
        request = create_request(page)
        # content =  定义请求方法
        content = get_conten(request)
        # 下载
        down_load(page,content)



6.必胜客多页下载    和post差不多轻微改动

# post 请求

# http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname
# cname: 北京
# pid:
# pageIndex: 1
# pageSize: 10

# http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname
# cname: 北京
# pid:
# pageSize: 10
# 引入
import urllib.parse
import urllib.request

# 定义第一个方法 定义page路径
def  create_request(page):
    # 前边一样不变的路径
    base_url = "http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname"

    # 定义后边不太一样的路径
    data ={
        "cname": "北京",
        "pid":'',
        "pageIndex": 1,
        "pageSize": 10
     }

    # 转换data路径编码
    data = urllib.parse.urlencode(data).encode('utf-8')

    # # 拼接打印路径  base_url +data 成一个完整路径
    # url = base_url +data
    # print(url)

    # 请求头
    headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"}

    # 防止反扒 特定
    request = urllib.request.Request(url=base_url,headers=headers,data=data)
    # 返回 request
    return  request

# # 定义第二个方法 定义get_conten  请求数据
def get_conten(request):
    # 模拟服务器发送请求
    response = urllib.request.urlopen(request)
    # 读取html页面数据 获取相应源码
    content = response.read().decode('utf_8')
    return  content

# # 定义第二个方法 定义down_load 下载
def down_load(page,content): # 获取 page页码
    # 拼接下载路径  以及下载格式
    with open('dobassn'+ str(page) +'.json','w',encoding='utf-8')as fp:
        fp.write(content)


if __name__ == '__main__':
    start_page = int(input('开始页码'))
    end_page = int(input('结束页码'))
    # 遍历
    for page in range(start_page,end_page+1):
        # request =  定义page路径
        request = create_request(page)
        # content =  定义请求方法
        content = get_conten(request)
        # 下载
        down_load(page,content)

 

7.urllib异常报错反馈


import urllib.request
# HTTPError 报错
# url = "https://blog.csdn.net/leva345/article/details/132907839?spm=1000.2115.3001.6382&utm_medium=distribute.pc_feed_v2.none-task-blog-yuanlijihua_tag_v1-2-132907839-null-null.pc_personrec&depth_1-utm_source=distribute.pc_feed_v2.none-task-blog-yuanlijihua_tag_v1-2-132907839-null-null.pc_personrec"

# URLError 报错
url = "https://www.goudan.com"

# 请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"
 }

# 判断 不太状态返回报错  也可以防止反爬
try:
    # 定制
    request = urllib.request.Request(url=url,headers=headers)
    # 模拟请求
    response = urllib.request.urlopen(request)
    # 获取信息
    content = response.read().decode('utf_8')
    print(content)
except urllib.error.HTTPError:
    print('系统升级')
except urllib.error.URLError:
    print('我都说了,别摁了')

 

 

8. # handler 代理请求 爬取次数太多会被封锁  所以要进行代理


# handler 请求
import urllib.request
import urllib.parse
# 定义路径
url = 'https://baike.baidu.com/'

headers ={
      "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36 Edg/116.0.0.0"
}

request = urllib.request.Request(url=url,headers=headers)

# 模拟服务器发送请求
# response = urllib.request.urlopen(request)

handler = urllib.request.HTTPHandler()

opener = urllib.request.build_opener(handler)

response = opener.open(request)




# 读取html页面数据 获取相应源码
content = response.read().decode("utf8")
print(content)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值