爬虫动态网页(原)

import requests
from lxml import etree
url=“https://www.qq.com/”
rqq = requests.get(url)
html = etree.HTML(rqq.text)
html.xpath("//a/text()")

import requests
from lxml import etree
rqq = requests.get(“https://www.zhipin.com/job_detail/?query=%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90%E5%B8%88&city=101120200&industry=&position=”)
html = etree.HTML(rqq.text)
html.xpath("//a/text()")

from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By

driver = webdriver.Chrome()
driver.get(“https://www.zhipin.com”)

wait = WebDriverWait(driver,10)
berfore = driver.current_window_handle
search_btn = driver.find_element_by_css_selector("#wrap > div.column-search-panel.search-panel-new > div > div.search-box > div.search-form > form > div.search-form-con > p > input"
)#搜索框
search_btn.send_keys(“数据分析师”)
confirm_btn = wait.until(EC.element_to_be_clickable(
(By.CSS_SELECTOR,"#wrap > div.column-search-panel.search-panel-new > div > div.search-box > div.search-form > form > button"))
)#搜索键
confirm_btn.click()

driver.switch_to_window(driver.window_handles[1])

from lxml import etree
import pandas as pd
html = driver.page_source
html = etree.HTML(html)

name = html.xpath(’//div/span/a/text()’)
area = html.xpath("//span[@class=‘job-area’]/text()")
salary = html.xpath("//div/span[@class=‘red’]/text()")
company = html.xpath("//div/h3/a/text()")
year = html.xpath("//div/div/div/ul/li/div/div/div/div/div/p/text()[1]")
education = html.xpath("//div/div/div/ul/li/div/div/div/div/div/p/text()[2]")
li = pd.DataFrame({‘职位名称’:name,‘公司’:company,‘地点’:area,‘工资’:salary,‘需求年限’:year,‘学历要求’:education})

for i in range(3):
nextpage_btn = wait.until(EC.element_to_be_clickable(
(By.CSS_SELECTOR,"#main > div > div.job-list > div.page > a.next"))
)#下一页
nextpage_btn.click()
html = driver.page_source
html = etree.HTML(html)
name = html.xpath(’//div/span/a/text()’)
area = html.xpath("//span[@class=‘job-area’]/text()")
salary = html.xpath("//div/span[@class=‘red’]/text()")
company = html.xpath("//div/h3/a/text()")
year = html.xpath("//div/div/div/ul/li/div/div/div/div/div/p/text()[1]")
education = html.xpath("//div/div/div/ul/li/div/div/div/div/div/p/text()[2]")
l = pd.DataFrame({‘职位名称’:name,‘公司’:company,‘地点’:area,‘工资’:salary,‘需求年限’:year,‘学历要求’:education})
print(i)
print(l)
print(’\n’)
li=pd.concat([li,l],ignore_index=True)
li

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值