Python爬虫:常见的反爬

常见的反爬

爬虫流程

  1. 确定爬虫对象(爬哪个网页的数据)
  2. 找接口
    • 有接口:直接对接口发送请求
      • 成功(直接json解析)
      • 失败就进入下一步
    • 没有接口,直接进入下一步
  3. 用requests直接对网页地址发送请求
    • 请求成功 -> 解析数据(bs4、lxml)
    • 请求失败 -> 尝试添加user-agent和cookie,成功就解析,失败进入下一步
  4. 用selenium打开网页,获取网页内容
    • 请求成功 -> 解析数据(bs4、lxml)
    • 请求失败 -> 找失败的原因,尝试解决失败的问题 -> 问题无法解决进入下一步
  5. 放弃,换一个目标

英雄联盟皮肤爬虫

import requests
import os


def get_all_hero_id():
    url = 'https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js'
    response = requests.get(url)
    return [hero['heroId'] for hero in response.json()['hero']]


def get_one_hero_skins(hero_id):
    # 1.对皮肤接口发送请求
    url = f'https://game.gtimg.cn/images/lol/act/img/js/hero/{hero_id}.js'
    response = requests.get(url)
    result = response.json()

    # 2.获取数据
    # 英雄名字
    hero_name = result['hero']['name']
    if not os.path.exists(f'./英雄皮肤/{hero_name}'):
        os.mkdir(f'./英雄皮肤/{hero_name}')

    # 所有皮肤
    for skin in result['skins']:
        # 获取皮肤名称和图片地址
        skin_name = skin['name'].replace('/', ' ')
        img_url = skin.get('mainImg')
        if not img_url:
            continue
        # 下载图片
        img_data = requests.get(img_url).content
        open(f'./英雄皮肤/{hero_name}/{skin_name}.jpg', 'wb').write(img_data)
        print('皮肤下载成功!')


if __name__ == '__main__':
    # 创建英雄信息对应的文件夹
    if not os.path.exists('./英雄皮肤'):
        os.mkdir('./英雄皮肤')

    hero_ids = get_all_hero_id()
    for h_id in hero_ids:
        get_one_hero_skins(h_id)
    print('========皮肤全部下载完成==========')

requests使用代理IP

获取代理IP的方法:
获取代理IP

import requests


def get_html(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
    }

    # 1. 使用代理: 给参数proxies赋值
    # {'https': 'ip:端口'}
    # 1)使用固定的代理ip
    response = requests.get(url, headers=headers, proxies={'https': '36.25.226.139:4513'})

    # 2)通过请求获取代理ip的地址动态获取最新的代理
    # ip = requests.get('http://d.jghttp.alicloudecs.com/getip?num=1&type=1&pro=&city=0&yys=0&port=11&time=4&ts=0&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions=').text.strip()
    # response = requests.get(url, headers=headers, proxies={'https': ip})
    # response = requests.get(url, headers=headers)
    print(response.text)
    return response.text


if __name__ == '__main__':
    url = 'https://movie.douban.com/top250'
    get_html(url)

selenium使用代理IP

from selenium.webdriver import Chrome, ChromeOptions

options = ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})

# 1. 使用代理
# --proxy-server=http://IP:端口号    (ip端口是https的ip)
options.add_argument('--proxy-server=http://122.6.202.214:4510')

b = Chrome(options=options)

b.get('https://movie.douban.com/top250')

requests自动登录

requests完成自动登录的步骤:

  1. 在谷歌浏览器中打开网页完成登录操作,然后刷新页面
  2. 打开当前页面的检查,在network的All选项下,找到当前页面的请求,获取Request Headers中的cookie值
  3. 在用requests发送请求的时候给headers赋值,在headers中添加cookie对应的键值对
import requests

headers = {
    'cookie': '_zap=b7912ec1-8e24-4d72-81cd-8fa2a2b5f78f; d_c0="AECfz77blxSPTv9pswdcr3xSHE0eCEMj0Dc=|1646568427"; _xsrf=qy8EtuCtpOGgIUmA3g0qSi2edFqOUIBw; __snaker__id=kvuiOSuliPGkosHY; _9755xjdesxxd_=32; YD00517437729195%3AWM_TID=pJYUp8Detk5AUUUUFFc6upRtNshfmnQX; q_c1=51a6e1898c4d46a594865b3db1dd3e95|1647227093000|1647227093000; NOT_UNREGISTER_WAITING=1; gdxidpyhxdE=q6eDxuI%5CS11auZ9%2Be%5C%2BxIx%2F2cg96ULLZchHGSiL8EQvgYs9OmbeiyhhCXoa%5C%2BblJQfD%5CayzzA8oosyB%2FhPPMm7%2Fkd8W5prCdSYuppYUL5qdoyPdjsHcgA8pZgBquceXtX6di5Mu46C7dEKcHakVA7mxxjMZh%2Bre5j%2F4AQta4bulZ39y%5C%3A1647834647437; YD00517437729195%3AWM_NI=oe9bRvHOQTEjCNug5CHPzre%2BCdBGZr6dru1M9KaaTCyY5hZsiM2d%2FSXjfLKsl91VYoTN6x%2Fvc%2FWYtZWgHtA%2BkNpQdcQgt86C%2F1Vffl9dc8gqk08aF1%2Bp0LL%2BSZzsecFXbUI%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6ee99e96f86e9ada5f97cbaac8aa3c84f878a9a85aa7afbeeac8cc97e8c989f8bb32af0fea7c3b92aadbbf783d17e8c919fb5c8628192bb88b8498c90b693e82181bc8ea8d97af6bcbe96b447fb95af82d749bb87fb89d979ab93fe85ae599cb6aaadca79b0e7ffb7dc659892fa99ae7ea1f199baf75295b98895c421a992c0b2e76993b9bcd9b549edef9a87e568fbbcadd9e67ba9f0f882c53be9b000adcf219186b694e8698688aeb6cc37e2a3; captcha_session_v2=2|1:0|10:1647833755|18:captcha_session_v2|88:Q0FUYjlIemk4MklmZlA5ZlVOWDczbGlhYjRabzMxME1Fc0N6Szl4aGVTQU5JQkdGUklNWFRBVVZtU0hYQUVvLw==|cf267f149c0b9509d0e424579194d94fc6ec1f57567afd34050dc2a47f14bacc; captcha_ticket_v2=2|1:0|10:1647833769|17:captcha_ticket_v2|704:eyJ2YWxpZGF0ZSI6IkNOMzFfZ2pLT21aR2FUbHdXWFo2SG9wTUY1OWRZTG0tcG1vcGlfNUtrUkpzUThJSmNlV2VJdVVzdEhWNks4TVNBcVhobEZPUkZMcjUuc3N1QXpmUzBHbnIuSUNIdjlnb04tdjdoMlBydUtGSmdmV2FYUUZ2SE10dUl0RU40TzRFQS14dEhtMXJRLWg3UHdRblRMVkt1VG1kX09oekdab0pzUGxUWGFmUVF1NzRfYjc4di1QUmlQVUJMR0s5TEhvTGpDX2JUMF9jNFllNlM1SVFYSGgtMHAwV1ZMR2IwMWVqMS5FdEg1ZzZHRUZNZWMwLU8uMXk3YnY2dzlHdGNmNEE1a3E2Ry5EOWJSVlN4QjRWWmZodU1Gdk54N2lvNkZNUjJCdmw4Q254R1NUeUhtLXI1Yl9xeEZPS00wLnJONjkweW14cTQ5MHNjUzBKeHBwUE9lY05BSDZyNV9Vb3ZxZkFkR191aEQwOGc5ZUkwR3k5X005QVkub3gxOS5QdE1GanQwc1dtNERVbVZwQ3ZJV3R4V21nUS5Nd0tVTk92U3N1ZFlmcXJGMmhIWjUyTXZZUzFLc1RQaGlTNTRQWDRETElCdXQuNmU0ZUpzUFUyUzBWeUJZUXE5WFZSOEUwYzE5MEVYX2x5cEJ2MVdManBzVXJlV0t3d19GcGhTLnguVnlGMyJ9|22db975942e5d8c1799fa7c70120378eed6a34bb8bdd00afa3c5eccf92436296; z_c0=2|1:0|10:1647833781|4:z_c0|92:Mi4xaW5CWUdRQUFBQUFBUUpfUHZ0dVhGQ1lBQUFCZ0FsVk50VUFsWXdEWFYyXzVOZVRzbGJmbVg4Mk1GNF95bUYtLWd3|9d5f40b82ca91b89a903d5d50df74ed66850c2b3ecf03d86510300f5eb729d96; tst=r; SESSIONID=hbQ6zfAEKL3cjwOoewSAbkpnJKR4GtTVNCDhhjrorQ2; KLBRSID=0a401b23e8a71b70de2f4b37f5b4e379|1647833814|1647833734',
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
response = requests.get('https://www.zhihu.com/', headers=headers)

print(response.text)

selenium自动登录获取cookie

from selenium.webdriver import Chrome

# 1. 获取cookie保存到本地
# 第一步:创建浏览器对象,打开需要自动登录的网站
b = Chrome()
b.get('https://www.taobao.com/')

# 2. 手动完成登录操作
input('是否完成:')

# 3. 获取cookie数据并且保存到本地文件中
cookies = b.get_cookies()
open('files/taobao.txt', 'w', encoding='utf-8').write(str(cookies))

selenium自动登录使用cookie

from selenium.webdriver import Chrome
from selenium.webdriver.common.keys import Keys

# 1. 创建浏览器对象打开网页
b = Chrome()
b.get('https://www.taobao.com/')

# 2. 添加本地保存的cookie信息
cookie_list = eval(open('files/taobao.txt', encoding='utf-8').read())
for cookie in cookie_list:
    b.add_cookie(cookie)

# 3.重新打开网页
b.get('https://www.taobao.com/')

# 4. 进行后续其他操作
search = b.find_element_by_id('q')
search.send_keys('雪糕')
search.send_keys(Keys.ENTER)

瓜子二手车字体反爬

  1. 打开当前页面的检查,在network的All选项下,在Name中找到带woff或woff2的选项,获取Headers中的Request URL地址
  2. 复制地址在新页面中打开下载文件
  3. 用FontEditor打开文件获取解码方式
import requests
from bs4 import BeautifulSoup
import re

headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
url = 'https://mapi.guazi.com/car-source/carList/pcList?page=1&pageSize=12&city_filter=12&city=12&guazi_city=12&tag_types=18&versionId=0.0.0.0&osv=IOS&platfromSource=wap'
response = requests.get(url, headers=headers)

table = {
    '0xe1d0': '7', '0xe325': '4', '0xe41d': '1', '0xe52e': '9', '0xe630': '2', '0xe76e': '8',
    '0xe891': '5', '0xe9ce': '0', '0xeaf2': '3', '0xec4c': '6', '0xf88a': '7'
}
# hex(十进制数)  -  将10进制转换成16进制
all_car = response.json()['data']['postList']
for car in all_car:
    price = car['price'].split(';')
    new_price = ''
    for x in price:
        if x.startswith('&#'):
            new_price += table[hex(int(x[2:]))]
        elif x.startswith('.&#'):
            new_price += '.' + table[hex(int(x[3:]))]
        else:
            new_price += x
    print(new_price)

#’):
new_price += table[hex(int(x[2:]))]
elif x.startswith(’.&#’):
new_price += ‘.’ + table[hex(int(x[3:]))]
else:
new_price += x
print(new_price)


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值