爬虫实战15:selenium爬取拉勾网Python职位表,并保存到MySQL中

import requests
import time
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import  NoSuchElementException
import pymysql


def get_page():
    url = "https://www.lagou.com/"
    browser = webdriver.Chrome()
    browser.get(url)
    quanguo = browser.find_element_by_id("cboxClose")
    quanguo.click()
    wait = WebDriverWait(browser, 30)
    # input = wait.until(EC.presence_of_all_elements_located((By.ID, 'search_input')))
    time.sleep(5)
    input = browser.find_element_by_id("search_input")
    # print(input)
    input.send_keys('Python')
    time.sleep(1)
    # button = wait.until(EC.presence_of_all_elements_located((By.ID, 'earch_button')))
    button = browser.find_element_by_id("search_button")
    button.click()
    source = browser.page_source
    html = etree.HTML(source)
    link = html.xpath('//a[@class="position_link"]/@href')
    url_list = []
    for position_url in link:
        url_list.append(position_url)
    browser.quit()
    return url_list


def parse_page(position_url):
    db = pymysql.connect(host='localhost', user='root', password='123456', port=3306, db='mysql')
    cursor = db.cursor()
    driver = webdriver.Chrome()
    driver.get(position_url)
    text = driver.page_source
    html = etree.HTML(text)
    job_info = {}
    name = html.xpath('//div[@class="position-content "]//span[@class="name"]/text()')[0]
    salary = html.xpath('//div[@class="position-content "]//span[@class="salary"]/text()')[0].strip()
    # job_request = html.xpath('//div[@class="position-content "]//dd[@class="job_request"]/p/span/text()')
    # print(job_request)
    drrs = html.xpath('//div[@class="position-content "]//dd[@class="job_request"]/p/span[2]/text()')[0].split('/')[1].strip()
    years = html.xpath('//div[@class="position-content "]//dd[@class="job_request"]/p/span[3]/text()')[0].split('/')[0].strip()
    jingyan = html.xpath('//div[@class="position-content "]//dd[@class="job_request"]/p/span[4]/text()')[0].split('/')[0].strip()
    zhiye = html.xpath('//div[@class="position-content "]//dd[@class="job_request"]/p/span[5]/text()')[0]
    company = html.xpath('//div[@class="job_company_content"]//em[@class="fl-cn"]/text()')[0].strip()
    infos = html.xpath('//div[@class="job-detail"]/p/text()')[:]
    infos = ''.join(infos)
    job_info['职位'] = name
    job_info['薪水'] = salary
    job_info['地址'] = drrs
    job_info['工作年限'] = years
    job_info['经验'] = jingyan
    job_info['是否全职'] = zhiye
    job_info['公司名称'] = company
    job_info['职位描述'] = infos
    sql = '''INSERT INTO func(names, salarys, drrss , yearss, jingyans, zhiyes, companys, infoss) values('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')'''\
          % (name, salary, drrs, years, jingyan, zhiye, company, infos)
    sql = sql.encode('utf-8')
    cursor.execute(sql)
    db.commit()
    db.close()
    print(job_info)
    driver.quit()


def main():
    url_list = get_page()
    for positioin_url in url_list:
        parse_page(positioin_url)



main()
  • 1
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值