爬虫模板 !request基础模块------>>模板

01request.测试

import requests


class RequestSpider(object):
    def __init__(self):
        url = "https://www.baidu.com/"
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'
        }
        self.response = requests.get(url=url, headers=headers)

    def run(self):
        data = self.response.content

        # 1.获取请求头
        requests_headers = self.response.request.headers

        # 2.获取响应头
        response_headers = self.response.headers

        # 3.响应状态码
        code = self.response.status_code

        # 4.请求的cookie
        requests_cookie = self.response.request._cookies
        print(requests_cookie)

        # 5.响应的cookie
        response_cookie = self.response.cookies
        print(requests_cookie)


RequestSpider().run()


02request.百度网址拼接

import requests

url = "https://www.baidu.com/s?"
headers = {
    'User-Agent': 'Mozilla/5.0 (Window NT 10.0; Win64; x64) ApleWebKit/.36 (KHTL, like Gcko) Chrome/68.03440.75 Safari537.'
}
params = {
    'wd': '美女'
}
response = requests.get(url=url, headers=headers, params=params)

data = response.content.decode()

with open('baidu.html', 'w', encoding='utf-8') as f:
    f.write(data)

#  发送post 和添加参数
requests.post(url=url, data=(参数), json=(参数))

03request.json处理

import requests
import json

url = "https://api.github.com/user"

# 这个网址返回的内容不是html 而是标准的json
response = requests.get(url)

# # str
# data = response.content.decode()
#
# # str -- dict
# data_dict = json.load(data)

# json() 自动将json字符串 转换成Python dict list
data = response.json()

print(data['message'])

04内网认证

import requests

url = ""
# 发送post请求
data = {

}
# response = requests.post(url,data=data)

# 内网 需要 认证
auth = (user,pwd)
response = requests.get(url,auth=auth)

05request.ip代理

import requests

# 1.请求url
url = "http://baidu.com"

headers = {
    'User-Agent': 'Mozil/5.0 (Widows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/8.0.340.75 Safari/7.36'
}

proxy = {'http':'115.223.7.110:80'}

respones = requests.get(url=url, headers=headers, proxies=proxy)
print(respones.status_code)

06verify.忽略证书访问

import requests

url = "https://www.12306.cn/"

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0.440.75 Safari/37.36'
}

# 因为https 是有第三方 CA 证书认证的
# 但是12306 虽然是https 但是 它不是 CA证书,他是自己 颁布的证书
# 解决方法 是:告诉 web 忽略证书 访问  ---> verify
response = requests.get(url=url,headers=headers, verify = False)
data = response.content.decode()

with open('03-ssl.html','w', encoding='utf-8')as f:
    f.write(data)

07cookie.字典推导式

import requests

# 请求数据的url
member_url = "https://www.yaozh.com/member/"

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'
}

# cookies 的字符串
cookies = 'acw_tc=2f624a3816063084872425645e1a39db08d805ba42e9362646c61150b84459; PHPSESSID=5d55asfp4jq8qjljpdncefv42; Hm_lvt_65968db3ac154c3089d7f9a4cbb98c94=1606308489; _ga=GA1.2.358237203.1606308489; _gid=GA1.2.81185910.1606308489; _gat=1; Hm_lpvt_65968db3ac154c3089d7fa4cbb98c94=606308591; yaozh_logintime=1606308608; yaozh_user=1011508%09%E8%8B%8F%7%A9%86%E5%86%B0%E7%99%BD%E6%9C%88%E6%99%A8; yaozh_userId=1011508; yaozh_jobstatus=kptt67UcJie6zKnFSe2JyYnoaSZ5ZmmpSdg26qb21rg66flM6bh5%2BscZJwbIVN7fBLDecc6%2BVM%2FSJHz78b0dmDdKRtmnCH0Jqq1ZemzNL2C34c39eb300BDcD84ff78fcbFa0e16Hg5%2FXm2iVb4eVm5FpamVwZphwU27UcJmeW6vKpZeFoNWblZebWZZrlZqYkWttZXBjmXJTbt4%3Dded04249ebd3f7f8fdf580ddf057dc78; db_w_auth=835368%09%E8%8B%8F%E7%A9%86%E5%86%B0%E7%99%BD%E6%9C%88%E6%99%A8; UtzD_f52b_saltkey=TYzr7923; UtzD_f52b_lastvisit=1606305009; UtzD_f52b_lastact=1606308609%09uc.php%09; UtzD_f52b_auth=44539GcljwyxnF28hO62fM7%2BiB5r6H7G7FwkTDCk4rpBIsMb%2Br6V%2Fa%2FCAe4n31ipRHhVvcRN%2B20L6QceoqsP5KEAFik; yaozh_uidhas=1; yaozh_mlogin=1606308611; acw_tc=2f624a3816063084872425645e1a39db08d80ba42e9362646c61150b84459'
# 需要的是 字典类型
# cook_dict = {}
cookies_list = cookies.split('; ')
# for cookie in cookies_list:
#     cook_dict[cookie.split('=')[0]] = cookie.split("=")[1]

#  字典推导式
cook_dict = {cookie.split('=')[0]:cookie.split('=')[1] for cookie in cookies_list}

respone = requests.get(url=member_url, headers=headers, cookies=cook_dict)

data = respone.content.decode()

with open("药智网.html",'w',encoding="utf-8") as f:
    f.write(data)

08session.账号登录获取cookie

import requests

# 请求数据的url
member_url = "https://www.yaozh.com/member/"

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'
}

# session类 可以自动保存cookies == cooke
session = requests.session()

# 1.代码登录
login_url = 'https://www.yaozh.com/login'
login_from_data = {
    'username': '苏穆冰白月晨',
    'pwd': '119000sr',
    'formhash': '2E94DD8BE',
    'backurl': '%F%www.yaozh.com',
}

login_response = session.post(url=login_url, data=login_from_data, headers=headers)

# 2.登陆成功之后 带着 有效的cookies 访问请求目标数据
data = session.get(url=member_url, headers=headers).content.decode()

with open("药智网.html", 'w', encoding="utf-8") as f:
    f.write(data)

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值