一、数据采集(selenium)
from selenium import webdriver
import time
import re
import pandas as pd
import os
在爬取的过程中可能会有登陆弹窗,要先定义一个处理弹窗的函数
def close_windows():
#如果有登录弹窗,就关闭
try:
time.sleep(0.5)
if dr.find_element_by_class_name("jconfirm").find_element_by_class_name("closeIcon"):
dr.find_element_by_class_name("jconfirm").find_element_by_class_name("closeIcon").click()
except BaseException as e:
print('close_windows,没有弹窗',e)
爬取部分,这里爬取维度为11列,基本上包含了职位的大部分信息
def get_current_region_job(k_index):
flag = 0
# page_num_set=0#每区获取多少条数据,对30取整
df_empty = pd.DataFrame(columns=['岗位', '地点', '薪资', '工作经验', '学历', '公司名称', '技能','工作福利','工作类型','融资情况','公司规模'])
while (flag == 0):
# while (page_num_set<151)&(flag == 0):#每次只能获取150条信息
time.sleep(0.5)
close_windows()
job_list = dr.find_elements_by_class_name("job-primary")
for job in job_list:#获取当前页的职位30条
job_name = job.find_element_by_class_name("job-name").text
# print(job_name)
job_area = job.find_element_by_class_name("job-area").text
# salary = job.find_element_by_class_name("red").get_attribute("textContent") # 获取薪资
salary_raw = job.find_element_by_class_name("red").get_attribute("textContent") # 获取薪资
salary_split = salary_raw.split('·') # 根据·分割
salary = salary_split[0] # 只取薪资,去掉多少薪
# if re.search(r'天', salary):
# continue
experience_education = job.find_element_by_class_name("job-limit").find_element_by_tag_name(
"p").get_attribute("innerHTML")
# experience_education_raw = '1-3年<em class="vline"></em>本科'
experience_education_raw = experience_education
split_str = re.search(r'[a-zA-Z =<>/"]{23}', experience_education_raw) # 搜索分割字符串<em class="vline"></em>
# print(split_str)
experience_education_replace = re.sub(r'[a-zA-Z =<>/"]{23}', ",", experience_education_raw) # 分割字符串替换为逗号
# print(experience_education_replace)
experience_education_list = experience_education_replace.split(',') # 根据逗号分割
# print('experience_education_list:',experience_education_list)
if len(experience_education_list)!=2:
print('experience_education_list不是2个,跳过该数据',experience_education_list)
break
experience = experience_education_list[0]
education = experience_education_list[1]
# print(experience)
# print(education)
company_type = job.find_element_by_class_name("company-text").find_element_by_tag_name(
"p").get_attribute("innerHTML")
company_type_size_row=company_type
split_str_2=re.search(r'[a-zA-Z =<>/"]{23}', company_type_size_row)
# print(split_str_2)
# print("split2------------------------------------------------------")
company_size_replace= re.sub(r'[a-zA-Z =<>/"]{23}', ",", company_type_size_row)
# print(company_size_replace)
company_size_list=company_size_replace.split(',')
# print(company_size_list)
if len(company_size_list) != 3:
print('company_size_list不是3个,跳过该数据', company_size_list)
break
company_direct_info = company_size_list[0].split(">")[1]
company_salary_info = company_size_list[1].split(">")[1]
company_size_info=company_size_list[2]
company = job.find_element_by_class_name("company-text").find_element_by_class_name("name").text
skill_list = job.find_element_by_class_name("tags").find_elements_by_class_name("tag-item")
skill = []
job_advantage=job.find_element_by_class_name("info-desc").text
for skill_i in skill_list:
skill_i_text = skill_i.text
if len(skill_i_text) == 0:
continue
skill.append(skill_i_text)
# print(job_name)
# print(skill)
df_empty.loc[k_index, :] = [job_name, job_area, salary, experience, education, company, skill,job_advantage,company_direct_info,company_salary_info,company_size_info]
print(df_empty.loc[k_index, :])
k_index = k_index + 1
# page_num_set=page_num_set+1
print("已经读取数据{}条".format(k_index))
close_windows()
try:#点击下一页
cur_page_num=dr.find_element_by_class_name("page").find_element_by_class_name("cur").text
# print('cur_page_num',cur_page_num)
#点击下一页
element = dr.find_element_by_class_name("page").find_element_by_class_name("next")
dr.execute_script("arguments[0].click();", element)
time.sleep(1)
# print('点击下一页')
new_page_num=dr.find_element_by_class_name("page").find_element_by_class_name("cur").text
# print('new_page_num',new_page_num)
if cur_page_num==new_page_num:
flag = 1
break
except BaseException as e:
print('点击下一页错误',e)
break
print(df_empty)
if os.path.exists("ai数据.csv"):#存在追加,不存在创建
df_empty.to_csv('ai数据.csv', mode='a', header=False, index=None, encoding='gb18030')
else:
df_empty.to_csv("ai数据.csv", index=False, encoding='gb18030')
return k_index
自动化爬取部分 这里按照全国14个热门城市爬取 若想爬取某个固定城市,需要把for循环去掉,去网站上找到对应城市编码,剪贴url即可<