day5-登录反爬AndXpath

day5-登录反爬AndXpath

一、requests自动登录步骤

requests自动登录步骤:

第一步:人工对需要自动登录网页进行登录

第二步:获取这个网站登录后的cookie信息

第三步:发送请求的时候在请求头添加cookie值

import requests

headers = {
    'cookie': '_zap=e6c6ddba-4cb7-4276-a2f1-211af31d1a78; _xsrf=fdfd5815-10b5-4b31-8ae7-af43812d2d4a; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1680319145; d_c0=ANCWDoTIjhaPTpGMnmpCwGfYszr2MxemdFk=|1680319144; captcha_session_v2=2|1:0|10:1680319144|18:captcha_session_v2|88:ZzFQTGFzTGx3Q3Y5cy9DVjhabjdCVkl1WmFQcmxpdllkVzVGUG9MdHQvM0IrYTI3dTBlVWpMK1BGQ1Y1VmdJaA==|30e64d2a3af950d08ceb7aec0f29d460045681003554ec346a0eb449b497ed83; SESSIONID=K7wKtdpM3EIKxu7TfE4FQNP8nBCeagRLsDslogNTmtt; JOID=Vl0UBUu2Gjoh-ZztG7Tf6BF9VmYI_ClKbIn5mVGBXndth-3bSxq7HEf8me4aJSs3JJRZumCMHJlReLEm29sb9w8=; osd=WlsTCk-6HD0u_ZDrHLvb5Bd6WWIE-i5FaIX_nl6FUnFqiOnXTR20GEv6nuEeKS0wK5BVvGeDGJVXf74i190c-As=; __snaker__id=K4jujWdsoYwoy226; gdxidpyhxdE=AzHkSx2bZCf%2B%2BYfvBOCGkT6uy%2BUs42pmA5cQU%5CNXdHCDBQVNE6hXeggmuKEXmz%5CmS0cTg0cjIqu0QAGMs9WNyk%5CTEXMCl8cOppMh5LJr99tTtkHgRtHS41Zk8RMpR4Cou6fdZnKgZZCCv%2B7WfYBclRWSeMXGYHhzoP5evccwnc6CxEgN%3A1680320047092; YD00517437729195%3AWM_NI=rNC6jlx2Tx0%2BAZb3J%2B7HrRcv7Q%2B6nl6TfN1FM%2FU%2Fj%2FuvFT9R6vwY8rrjXIXbE8twrzF52AX%2FYu5tL5JRAdPCjB%2FABn%2BFl1lnZiboecpeuv3u4bRaHqSqQ0jkOeTVPHi9ZzE%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eed0b65ffcbfa88cf969b8b48ab3c55e839a9bb0d16bf4ea97d6ea79fbb2b992e82af0fea7c3b92ab2928dd7e968bcee9c99b3218386aed4c921f598beb3e27ef58fe5d7ed68f68caaafb44e8ff19bd0e925b7aa8ab5bc5af6aab9babb7ab2b698aadb6a87e8bc98f17286ebfea2db259babfb95cf3f8a8e8f85d349aa9ee1a2c166e99aa4d2ef669ab7ae90e869b4bbbb97b27ff8be9e82cb7b91af9fd1e9618dbb9cb7d344abe7969bc837e2a3; YD00517437729195%3AWM_TID=rzR%2BAjupLKdBAFAFBFKUag%2FZo058ngqP; o_act=login; ref_source=other_https://www.zhihu.com/signin?next=/; expire_in=15552000; q_c1=7e80954b07864387a50286010f835d1b|1680319155000|1680319155000; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1680319158; tst=r; z_c0=2|1:0|10:1680319158|4:z_c0|92:Mi4xcmFnVE53QUFBQUFBMEpZT2hNaU9GaGNBQUFCZ0FsVk5zX0FVWlFBVXNHSTNGSlFuWmhmUlJScTZIUkpEdWtXb3dn|bdfcf4e0e24ea33b2ee2647a7fed5579f0c77b69b39296b20468baf0c6b2c666; KLBRSID=cdfcc1d45d024a211bb7144f66bda2cf|1680319158|1680319143',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
}

response = requests.get('https://www.zhihu.com/',headers=headers)

print(response.text)

二、selenium获得cookie

from selenium.webdriver import Chrome

# 1.创建浏览器打开需要自动登录的网页
b = Chrome()
b.get('https://www.taobao.com')

# 2.留足够长的时间,人工完成登录(必须得保证b指向的窗口的网页中能看到登录以后的信息)
input('是否完成登录了')

# 3.获取登录成功的cookie信息,保存到本地文件
# result是一个列表,里面有多个字典元素
result = b.get_cookies() 
with open('files/taobao.txt','w',encoding='utf-8') as f:
    f.write(str(result))

三、selenium使用cookie

from selenium.webdriver import Chrome
# 1.创建浏览器打开需要自动登录的网页
b = Chrome()
b.get('https://www.taobao.com')

# 2.获取本地保存的cookie
with open('files/taobao.txt',encoding='utf-8') as f:
    result = eval(f.read())

# 3.添加cookie
for i in result:
    b.add_cookie(i)

# 4.重新打开网页
b.get('https://www.taobao.com')
input('end:')

四、request代理

import requests

headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}

# 创建代理
proxies = {
    'https': '116.140.52.224:4513'
}

# 使用代理ip发送请求
res = requests.get('https://movie.douban.com/top250?start=0&filter=', headers=headers, proxies=proxies)
print(res.text)

五、selenium代理

from selenium.webdriver import  Chrome,ChromeOptions

options = ChromeOptions()

# 设置代理
options.add_argument('--proxy-server=http://122.137.231.226:4524')

b = Chrome(options=options)
b.get('https://movie.douban.com/top250?start=0&filter=')
input()

六、爬虫策略

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-xaTuR4VN-1680442285024)(C:\Users\de’l’l\AppData\Roaming\Typora\typora-user-images\image-20230402204226592.png)]

七、Xpath解析

import json
# xpath用来解析网页数据或者xml数据的一种解析方法,它是通过路径来获取标签(元素)。
"""
Python数据:{'name': 'xiaoming', 'age': 18, 'is_ad': True, 'car_no': None}

Json数据:{"name": "xiaoming", "age": 18, "is_ad": true, "car_no": null}

xml数据: 
<allStudent>
    <student class="优秀学员">
        <name>xiaoming</name>
        <age>18</age>
        <is_ad>是</is_ad>
        <car_no></car_no>
    </student>
    <student class="优秀学员">
        <name>xiaoming</name>
        <age>18</age>
        <is_ad>是</is_ad>
        <car_no></car_no>
    </student>
</allStudent>
"""

# 1. 常见的几个概念
"""
1)树:整个网页结构和xml结构就是一个树结构
2)元素(节点):html树结构的每个标签
3)根节点:树结构中的第一个节点
4)内容:标签内容
5)属性:标签属性
"""


# 2. Xpath语法
"""
1. 获取标签
1)绝对路径: 以'/'开头,然后从根节点开始层层往下写路径
2)相对路径: 写路径的时候用'.'或者'..'开头,其中'.'表示当前节点;'..'表示当前节点的父节点。
          注意:如果路径以'./'开头,'./'可以省略
3)全路径: 以'//'开头的路径

2.获取标签内容:在获取标签的路径的最后加'/text()'
3.获取标签属性:在获取标签的路径的最后加'/@属性名'
"""
# ==============================应用===================================
from lxml import etree

# 1.创建树结构,获取根节点
html = open('data.html', encoding='utf-8').read()
root = etree.HTML(html)

# 2.通过路径获取标签
# 节点对象.xpath(路径)  -  根据获取所有的标签,返回值是列表,列表中的元素是节点对象

# 1)绝对路径
result = root.xpath('/html/body/div/a')
print(result)

# 获取标签内容
result = root.xpath('/html/body/div/a/text()')
print(result)

# 获取标签属性
result = root.xpath('/html/body/div/a/@href')
print(result)

# 1)绝对路径的写法跟xpath前面用谁去点的无关
div = root.xpath('/html/body/div')[0]

result = div.xpath('/html/body/div/a/text()')
print(result)

print('--------------------------------华丽的分割线-------------------------------------')
# 2)相对路径
result = root.xpath('./body/div/a/text()')
print(result)

result = div.xpath('./a/text()')
print(result)

result = div.xpath('a/text()')
print(result)

# 3)全路径
result = root.xpath('//a/text()')
print(result)

result = div.xpath('//a/text()')
print(result)

result = root.xpath('//div/a/text()')
print(result)

print('--------------------------------华丽的分割线-------------------------------------')
# 3.加谓语(加条件) -  路径中的节点[]
# 1)位置相关谓语
"""
[N]     -   第N个指定标签(N从1开始)
[last()]    -   最后一个指定标签
[last()-N]
[position()>N]、[position()>=N]、[position()<N]、[position()<=N]
"""
result = root.xpath('//span/p[2]/text()')
print(result)

result = root.xpath('//span/p[last()]/text()')
print(result)

result = root.xpath('//span/p[position()<=2]/text()')
print(result)

result = root.xpath('//span/p[position()>2]/text()')
print(result)

result = root.xpath('//span/p[last()-1]/text()')
print(result)

print('--------------------------------华丽的分割线-------------------------------------')
# 2)属性相关谓语
"""
[@属性名=属性值]
"""
result = root.xpath('//span/p[@id="p1"]/text()')
print(result)

result = root.xpath('//span/p[@class="c1"]/text()')
print(result)

result = root.xpath('//span/p[@data="5"]/text()')
print(result)

# 4.通配符
# 在xpath中可以通过*来表示任意标签或者任意属性
result = root.xpath('//span/*/text()')
print(result)

result = root.xpath('//span/p[@class="c1"]/text()')
print(result)

result = root.xpath('//span/*[@class="c1"]/text()')
print(result)


result = root.xpath('//span/span/@*')
print(result)

result = root.xpath('//*[@class="c1"]/text()')
print(result)






八、举例使用Xpath爬豆瓣

import requests
from lxml import etree

# 1.获取网页数据
headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
proxies = {
    'https': '117.70.49.86:4531'
}
response = requests.get('https://movie.douban.com/top250?start=125&filter=', headers=headers, proxies=proxies)

# 2.解析数据
root = etree.HTML(response.text)

names = root.xpath('//div[@class="hd"]/a/span[1]/text()')
scores = root.xpath('//span[@class="rating_num"]/text()')
comments = root.xpath('//div[@class="star"]/span[last()]/text()')
msgs = root.xpath('//p[@class="quote"]/span/text()')
print(names)
print(scores)
print(comments)
print(msgs)

ames = root.xpath(‘//div[@class=“hd”]/a/span[1]/text()’)
scores = root.xpath(‘//span[@class=“rating_num”]/text()’)
comments = root.xpath(‘//div[@class=“star”]/span[last()]/text()’)
msgs = root.xpath(‘//p[@class=“quote”]/span/text()’)
print(names)
print(scores)
print(comments)
print(msgs)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值