python_selenium和xpath解析

1.51数据分析
from selenium.webdriver import Chrome
from bs4 import BeautifulSoup
import csv 
from selenium.webdriver.common.keys import Keys
import re
import time, json

f = open('files/数据分析.csv', 'a', encoding='utf-8')
writer = csv.writer(f)
writer.writerow(['岗位', '薪资', '公司', '地址'])
b = Chrome()


def get_net_data():

    b.get('https://www.51job.com/')
    search_input = b.find_element_by_css_selector('#kwdselectid')
    search_input.send_keys('数据分析')
    search_input.send_keys(Keys.ENTER)

    while True:
        # 获取页信息
        page_div = b.find_element_by_css_selector('.rt.rt_page')
        pages = page_div.text.split('/')
        all_page = int(pages[-1])
        current_page = int(pages[0])

        # 获取网页源代码
        # print(b.page_source)
        save_data(b.page_source)

        if current_page < 10:
            next = b.find_element_by_css_selector('.next')
            next.click()
        else:
            break


def save_data(html: str):
    soup = BeautifulSoup(html, 'lxml')
    all_job_div = soup.select('.j_joblist>.e')
    one_page_jobs = []
    for job_div in all_job_div:
        name = job_div.select_one('.jname.at').get_text()
        sal = job_div.select_one('.sal').get_text()
        company = job_div.select_one('.cname.at').get_text()
        job_url = job_div.select_one('.el').attrs['href']
        one_page_jobs.append([name, sal, company, job_url])
    writer.writerows(one_page_jobs)


if __name__ == '__main__':
    get_net_data()
2.前进后退和切换选项
from selenium.webdriver import Chrome
import time

# ==============1.前进和后退 ================
# b = Chrome()
# b.get('https://www.baidu.com')
# time.sleep(1)
# b.get('https://www.runoob.com')
# time.sleep(1)
# b.get('https://movie.douban.com/top250')
# time.sleep(1)
# b.back()
# time.sleep(1)
# b.forward()


b = Chrome()
b.get('https://movie.douban.com/')
music = b.find_element_by_css_selector('.global-nav-items>ul>li:nth-child(4)>a')
url = music.get_attribute('href')
music.click()

time.sleep(2)
# 切换选项卡
b.switch_to.window(b.window_handles[0])
# b.get('https://movie.douban.com/')

time.sleep(2)
b.switch_to.window(b.window_handles[1])
# b.get(url)

print('----------------------')
3.等待
from selenium.webdriver import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

b = Chrome()
b.get('https://read.douban.com/')

"""
1. 隐式等待
如果设置了隐式等待时间,那么浏览器对象在通过find_element相关方法获取标签的时候,在找不到对应的时候不会马上报错,
而是在指定时间内不断尝试获取该标签,如果超过了指定时间还是获取不到才会报错
"""
# 设置隐式等待时间只需要设置一次,然后全局有效
b.implicitly_wait(2)


"""
2. 显式等待
1) 创建等待对象:WebDriverWait(浏览器对象, 超时时间, 检测时间间隔=500ms)   
2) 
等待对象.until(条件)  -  等到指定条件为True获取对应的标签或者结果。如果过了超时间这个添加都不成立,会异常
等待对象.until_not(条件)  -  等到指定条件为False获取对应的标签

常见的条件:
EC.presence_of_element_located      -       判断某个元素是否被加到dom树里(判断某个标签是否加载到网页中,不一定可见),条件成立的时候返回对应的标签
EC.visibility_of_element_located    -   判断某个标签是否可见(没有隐藏,并且元素的宽度和高度都不等于0),条件成立的时候返回对应的标签
EC.text_to_be_present_in_element   -  判断某个标签中的标签内容是否 包含 了预期的字符串,条件成立的时候返回布尔True
EC.text_to_be_present_in_element_value  - 判断某个标签中的value属性是否包含了预期的字符串,条件成立的时候返回布尔True
EC.element_to_be_clickable      -   判断某个标签是否可以点击,条件成立的时候返回对应的标签
"""
# wait = WebDriverWait(b, 5)
# wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'css选择器')))
# wait.until(EC.visibility_of_element_located((By.ID, 'id属性值')))
# wait.until(EC.text_to_be_present_in_element((By.ID, 'id属性值'), '请登录'))
# wait.until_not(EC.text_to_be_present_in_element((By.ID, 'id属性值'), '请登录'))
# wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'css选择器')))

# 直接获取指定标签
# a = b.find_element_by_css_selector('.widget-channel-links.section-container>a:nth-child(3)')
# a.click()

# wait = WebDriverWait(b, 10)
# a = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.widget-channel-links.section-container>a:nth-child(3)')))
# a.click()

wait = WebDriverWait(b, 10)
a = wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, '.widget-channel-links.section-container>a:nth-child(3)'), '女性'))
print(a)
4.东京页面滚动
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.keys import Keys
import time

options = ChromeOptions()
options.add_experimental_option("excludeSwitches", ['enable-automation', 'enable-logging'])
options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})


b = Chrome(options=options)
b.get('https://www.jd.com')
b.implicitly_wait(2)

search_input = b.find_element_by_id('key')
search_input.send_keys('电脑')
search_input.send_keys(Keys.ENTER)

time.sleep(2)

# 滚动
max_height = 8900
height = 500
while True:
    b.execute_script(f'window.scrollTo(0,{height})')
    height += 500
    time.sleep(1)
    if height > max_height:
        break


# b.execute_script('alert("To Bottom")')
5.网易邮箱
from selenium.webdriver import Chrome

"""
前端在实现网页功能的时候可能出现网页中嵌套网页的现象,如果要在一个网页中嵌套另外一个网页,必须使用iframe标签。
selenium爬取的时候,通过浏览器对象默认获取到的是最外层的html对应的网页,如果要获取嵌套
页面中的内容,必须同switch_to来切换frame
"""

b = Chrome()
b.get('https://mail.163.com/')

# 切换到内部嵌套的html中
frame = b.find_element_by_css_selector('#loginDiv>iframe')
b.switch_to.frame(frame)

print(b.page_source)
6.爬虫总结
import requests
"""
1.用requests + 网页地址
2.用requests + 网页地址 + user-agent
3.如果需要登录:用requests + 网页地址 + user-agent + cookie
4.找json接口
5.使用selenium
6.放弃
"""
url = 'https://music.163.com/weapi/copyright/pay_fee_message/config?csrf_token='
data = {
    'params':'6dXzk+WgT/Z5xyz1y4uiSd4Y3ZnijDlZRG31VTOQZpcV9MOf4ZVlBUnsu9XW5cDt',
    'encSecKey': 'b216d2737920dbffba3acf8ccc42a886afa34715beef702a29919ae5275f1144fd797f93315b70d5e959ad359709a764f0406c314aa6aab45012245937bdeaa9136ef957a8d862cbbad2c322fb452deb5731c34801cd82127f1a4261a9a87786d8b1ef18ff4aff7274f53e64a302db0a7ca3c0a364f6bb5c563a06c50f274297'
}
response = requests.post(url, data=data)
print(response.text)
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值