day 5-登录和代理IP学习总结

day 5-登录和代理IP

1. 京东商品

from selenium.webdriver import Chrome, ChromeOptions
import time, csv, os
from bs4 import BeautifulSoup
def analysis_data(html):
    soup = BeautifulSoup(html, 'lxml')
    goods_li = soup.select('#J_goodsList>ul>li.gl-item')
    all_data = []
    for li in goods_li:
        name = li.select_one('.p-name>a').attrs['title']
        price = li.select_one('.p-price i').text
        comment_count = li.select_one('.p-commit a').text
        shop_name = li.select_one('.p-shop a').attrs['title']
        goods_url = 'https:' + li.select_one('.p-name>a').attrs['href']
        all_data.append([name, price, comment_count, shop_name, goods_url])

    result = os.path.exists('files/电脑.csv')
    with open('files/电脑.csv', 'a', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        if not result:
            writer.writerow(['商品名称', '价格', '评论数', '店铺名', '商品详情地址'])
        writer.writerows(all_data)


def get_net_data():
    options = ChromeOptions()
    options.add_experimental_option('excludeSwitches', ['enable-automation'])
    options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
    b = Chrome(options=options)
    b.implicitly_wait(3)
    b.get('https://www.jd.com')
    b.find_element_by_id('key').send_keys('电脑\n')

    for page in range(5):
        # 滚动
        for _ in range(10):
            b.execute_script('window.scrollBy(0, 800)')
            time.sleep(1)

        # 获取网页源代码, 解析数据
        analysis_data(b.page_source)

        # 点击下一页
        b.find_element_by_class_name('pn-next').click()

    b.close()


if __name__ == '__main__':
    get_net_data()

2. requests的登录反爬

1.request登录反爬过程

1)用谷歌浏览器打开需要爬取的网站,人工完成登录
2)获取这个网站登录后的cookie信息:

检查 -> Network -> All -> 网站域名 -> Headers -> RequestHeader -> cookie

3)在headers对应的字典中添加键值对:

‘cookie’: 获取到的cookie信息

headers = {
    'cookie': '_zap=bccd72c4-94da-4aad-b6f4-c4a076540641; d_c0="APCfiebWwxSPTmf4t0eyATFtTgFHdTZJ_sk=|1649519947"; _xsrf=7RrxkbJCk4djlHfCY5NfKwhhm5IDMyOD; __snaker__id=ncPyNsuTImwpYs5U; gdxidpyhxdE=R63k63BTZgZmXMVXEoiGKqKXyt%5C%2Fwv%5CicGl9ILEMdLjgbXj7nk27VJCDgVByWgL2E9%5C81w5II3sRI%2BLlrU%5CNujzOp%2FwpBDxMoiUCttYM9TUr%5C%2BQqMfbqqZJBEnfpo9CWB%2FuKHMltgaSI1NYgTxXcR3WmZ5ZsoHtDeaKKogUenjN9E8Lv%3A1652758223246; _9755xjdesxxd_=32; YD00517437729195%3AWM_NI=Xuk9mmRu2obPKenE5xTdT7AqXl1rL8PVAmtSkwPqQJEAi%2Fgw5Rrf29oqo7haaB55HXMpL5Jui%2FVOahF9E1ApTJEtnvVCnhIJGf3LaCANP%2FHMABabdhMDZF2MtcUI9OopcWc%3D; YD00517437729195%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeb8c947b2b6afd0d75f819a8fa7c14a938f8f82d44e94869eb2d879ae989ea4f92af0fea7c3b92a9caba5daf06792abfe90e15c89bba2a6b87498bc9cd8c47b9cb4ff93cf34948a8f8ef759a9a9a092d442879fa7d0e67df78ebe87b133adafa1d8eb6ba39efbd6cb548fb084d6c97fabf19ab0e46486e78bd2c47c98968b95ed7e969da289cc4285bafdade4498392a0d0cb47839eaf83e7648f9c96a7b147b8bd9baabc45adb0adb8e237e2a3; YD00517437729195%3AWM_TID=Pc%2Fuvsk7znBBBVVAUFaQQKIAVRsmXUH3; z_c0=2|1:0|10:1652757662|4:z_c0|92:Mi4xaW5CWUdRQUFBQUFBOEotSjV0YkRGQ1lBQUFCZ0FsVk5ubUp3WXdCZmJBa3hwTlZ1dE1Rekd3SUR6cTQwVTFENEtB|7cb09bcd80a91a0f43cefc8f5fd17ec6c5c09a296a998fc072c8f3102369355c; q_c1=b37ba0a575a145f488c3797eca033459|1652757662000|1652757662000; NOT_UNREGISTER_WAITING=1; tst=r; SESSIONID=iLxUXABdCV8GFi7I5vlsdHR5A5BUWeimkSwjzksuYXm; KLBRSID=fe78dd346df712f9c4f126150949b853|1656039061|1656038718',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
response = requests.get('https://www.zhihu.com/', headers=headers)

print(response.text)

3.selenium获取cookie

from selenium.webdriver import Chrome
from json import dumps

1.打开需要做自动登录的网站

b = Chrome()
b.get('https://www.taobao.com')

2.提供足够长的时间让人工在这个页面中完成登录(登录后一定要保证b对应的窗口出现登录信息)

input('登录完成:')

3.获取登录后的cookie信息保存到本地文件中(建议保存一个json)

cookies = b.get_cookies()

with open('files/taobao.json', 'w', encoding='utf-8') as f:
    f.write(dumps(cookies))

b.close()

4.selenium获取cookie

from selenium.webdriver import Chrome
from json import loads

1.打开需要爬取的网站

b = Chrome()
b.get('https://www.taobao.com')

2.从cookie文件中获取cookie信息并且添加到浏览器对象中

with open('files/taobao.json', encoding='utf-8') as f:
    cookies = loads(f.read())

for x in cookies:
    b.add_cookie(x)

3.重新打开网页

b.get('https://www.taobao.com')

input('end:')
b.close()

5.requests使用代理IP

import requests
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}

# 创建代理对象
# proxies = {
#     'https': 'http://183.165.224.25:4554',
#     'http': 'http://183.165.224.25:4554'
# }
proxies = {
    'https': '183.165.224.25:4554',
    'http': '183.165.224.25:4554'
}
# 发送请求的时候使用代理
response = requests.get('https://www.maoyan.com/', headers=headers, proxies=proxies)
# 解决乱码问题
response.encoding = 'utf-8'
print(response.text)

6.代理IP的实际用法

import requests
import time
from bs4 import BeautifulSoup
def get_ip():
    """
    获取代理ip,如果获取失败过2秒再重新获取
    :return: 获取到的ip地址
    """
    while True:
        response = requests.get('http://d.jghttp.alicloudecs.com/getip?num=1&type=1&pro=510000&city=510600&yys=0&port=1&time=2&ts=0&ys=0&cs=0&lb=4&sb=0&pb=4&mr=1&regions=')
        result = response.text
        if result[0] == '{':
            print('ip获取失败')
            time.sleep(2)
        else:
            return result


def get_net_data():
    url = 'https://www.maoyan.com/'
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
    }

    # 使用代理ip发送请求,如果代理失败,重新获取新的ip重新再发送请求
    while True:
        ip = get_ip()
        print(ip)
        proxy = {
            'https': ip
        }
        response = requests.get(url, headers=headers, proxies=proxy)
        response.encoding = 'utf-8'
        print(response.text)

        soup = BeautifulSoup(response.text, 'lxml')
        movies_div = soup.select('.movie-list .movie-item')
        if len(movies_div) == 0:
            continue
        else:
            print('爬虫成功!做后续的解析操作')
            break


if __name__ == '__main__':
    get_net_data()

7.selenium使用代理IP

from selenium.webdriver import Chrome, ChromeOptions
options = ChromeOptions()
options.add_argument('--proxy-server=http://115.208.231.37:4545')

b = Chrome(options=options)
b.get('https://www.maoyan.com/')

print(b.page_source)

input('end:')
b.close()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值