python100day - day23-SeleniumAndProxy

python100day - day23-SeleniumAndProxy

1.selenium的基本设置
from selenium import webdriver
from selenium.webdriver import ChromeOptions

# 1.创建设置对象
options = ChromeOptions()

# 2.取消自动测试检测
options.add_experimental_option('excludeSwitches', ['enable-automation'])

# 3.取消图片加载(可以提高页面的加载速度)
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})

b = webdriver.Chrome(options=options)
b.get('https://www.jd.com/')
2.selenium交互
import time
from selenium import webdriver
from selenium.webdriver import ActionChains

b = webdriver.Chrome()


def jing_dong():
    # 打开京东首页
    b.get('https://www.jd.com/')
    # 获取登录按钮并点击
    login_btn = b.find_element_by_css_selector('.user_login')
    login_btn.click()
    # 获取账号登录按钮
    user_btn = b.find_element_by_css_selector('.login-tab.login-tab-r')
    user_btn.click()
    # 获取账号密码输入框
    user_name = b.find_element_by_css_selector('#loginname')
    password = b.find_element_by_css_selector('#nloginpwd')
    user_name.send_keys('aaa')
    password.send_keys('123456')
    # 获取登录按钮
    login_btn = b.find_element_by_css_selector('.login-btn')
    login_btn.click()

    # 拖动滑块
    slider = b.find_element_by_css_selector('.JDJRV-slide-btn')
    # 1)创建动作链对象
    # ActionChains(浏览器对象)
    action = ActionChains(b)
    # 2)添加 按住指定标签不放 的动作并且执行
    action.click_and_hold(slider).perform()
    # 3) 添加 拖拽 的动作
    # drag_and_drop_by_offset(拖拽对象, x方向的偏移, y方向偏移)
    # drag_and_drop(拖拽对象,目标对象)
    action.drag_and_drop_by_offset(slider, 100, 0).perform()
    # 4) 添加停顿和释放的动作
    # action.pause(3).perform()
    time.sleep(3)
    action.click_and_hold(slider).perform()
    action.drag_and_drop_by_offset(slider, 100, 0).perform()


def scroll():
    b.get('https://jd.com')

    # 滚动的js代码: window.scrollTo(0, 指定y坐标)

    # b.execute_script('alert("你好吗?")')
    # height = 100
    # while height < 17772:
    #     b.execute_script(f'window.scrollTo(0, {height})')
    #     height += 100
    #     time.sleep(1)

    js = """
    height = 100
    //添加定时器,每隔300毫秒滚动200像素
    t = setInterval(function(){
        max = document.body.scrollHeight
        window.scrollTo(0, height)
        height += 200
            if(height > max){
                clearInterval(t)
            }
        }, 300)
    """
    b.execute_script(js)




if __name__ == '__main__':
    scroll()
3.网易邮箱(嵌套页面)
from selenium import webdriver
from selenium.webdriver import ChromeOptions

options = ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
b = webdriver.Chrome(options=options)

b.get('https://mail.163.com/')
# 需要爬取的内容在嵌套页面中(嵌套页面在iframe标签中),获取标签前需要切换页面
# 1.拿到嵌套页面对应的iframe标签
frame = b.find_element_by_css_selector('#loginDiv>iframe')
# 2.切换页面
b.switch_to.frame(frame)
# 3.获取嵌套页面中标签
user_name = b.find_element_by_name('email')
password = b.find_element_by_name('password')
login_btn = b.find_element_by_id('dologin')
user_name.send_keys('y_t209')
password.send_keys('123456')
login_btn.click()
4.等待
import time
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

options = ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
b = webdriver.Chrome(options=options)

b.get('https://www.51job.com/')
input = b.find_element_by_id('kwdselectid')
input.send_keys('python')
input.send_keys(Keys.ENTER)

for _ in range(10):
    print(b.page_source)
    time.sleep(2)
    # 显示等待: 等到满足某个条件为止
    wait = WebDriverWait(b, 10)
    # 等待标签加载成功
    # next = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'next')))
    # 等待标签可以点击
    next = wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'next')))
    try:
        next.click()
    except:
        time.sleep(1)
5.获取代理
import requests


# ===============获取代理ip=================
def get_ip():
    url = 'http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=6226c130427f487385ad7b5235bc603c&count=5&expiryDate=0&format=2&newLine=3'
    response = requests.get(url)
    if response.status_code == 200:
        if response.text[0] == '{':
            print('获取ip失败')
        else:
            return [x for x in response.text.split('\n') if x]
    else:
        print('请求失败')


def use_proxy():
    ips = get_ip()
    if ips:
        proxy = {'http': ips[0], 'https': ips[1]}
        print(proxy)
        response = requests.get('https://cd.fang.anjuke.com/loupan/all/p1/', proxies=proxy)
        if response.status_code == 200:
            print(response.text)
        else:
            print('请求失败!', response)
    else:
        print('获取ip失败!')


use_proxy()
6.正则数据解析
import requests
import re


def get_data():
    url = 'https://movie.douban.com/top250'
    header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
    }
    response = requests.get(url, headers=header)
    if response.status_code == 200:
        # print(response.text)
        analysis_data(response.text)
    else:
        print('请求失败')


def analysis_data(data):
    """解析数据"""
    print(data)
    print('\n\n')
    # 1.提取电影名称
    re_str = r'(?s)<li>.+?<span class="title">(.+?)</span>.+?<span class="rating_num" property="v:average">(.+?)</span>.+?<span>(.+?)</span>.+?<span class="inq">(.+?)</span>.+?</li>'
    result = re.findall(re_str, data)
    print(result)




if __name__ == '__main__':
    get_data()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值