前程无忧招聘信息详细爬取

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug  1 14:25:43 2018

@author: ding
"""

import requests
from bs4 import BeautifulSoup

headers={'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}

ll = []
def geturl(url):
    res=requests.get(url,headers=headers)
    res.encoding=res.apparent_encoding
    soup=BeautifulSoup(res.text,'html.parser')
    lianjie=soup.find_all('p',class_='t1')   
    for i in lianjie:
        try:
            lianjie2=i.find('a')['href']
            ll.append(lianjie2)
        except:
            pass
    return ll
        

#url='https://jobs.51job.com/shanghai-hpq/104752757.html?s=01&t=0'

total=[]
def getinfo(URL):
    res=requests.get(URL,headers=headers)
    res.encoding=res.apparent_encoding
    soup=BeautifulSoup(res.text,'html.parser')
    
    all=soup.find_all('div',class_='cn')
    for each in all:
        zhiwei=each.find('h1').text
        diqu=each.find('span',class_='lname').text
        gongsi=each.find('p',class_='cname').text.strip('\n')
        jianjie=each.find('p',class_='msg ltype').text
        jianjie1='--'.join(list(map(lambda x:x.strip(),jianjie.split('|'))))
        xinzi=each.find('strong').text
        
    all2=soup.find_all('div',class_='tCompany_main')
    for each2 in all2:
        jingyan=each2.find_all('span',class_='sp4')
        jingyan1='--'.join(list(map(lambda x:x.text.strip(),jingyan)))
        fuli=each2.find_all('p',class_='t2')
        fuli1='--'.join('--'.join(list(map(lambda x:x.text.strip(),fuli))).split('\n'))
        zhize=each2.find_all('div',class_='bmsg job_msg inbox')
        for p in zhize:
            zhize1=p.find_all('p')
            zhize2='\n'.join(list(map(lambda x:x.text.strip(),zhize1)))
        dizhi=each2.find('div',class_='bmsg inbox').text.strip('\t\t\t\t\t\t\t\n地图')
        xinxi=each2.find('div',class_='tmsg inbox').text.strip()
        
    info={'zhiwei':zhiwei,
          'diqu':diqu,
          'gongsi':gongsi,
          'jianjie':jianjie1,
          'xinzi':xinzi,
          'jingyan':jingyan1,
          'fuli':fuli1,
          'zhize':zhize2,
          'dizhi':dizhi,
          'xinxi':xinxi}
    total.append(info)
    return total
  
    
if __name__ == '__main__':
    url='https://search.51job.com/list/020000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='#只抓一页,可以for循环抓多页
    
    for i in geturl(url)[1:]:
        getinfo(i)
    
    #getinfo('https://jobs.51job.com/shanghai-pdxq/105175200.html?s=01&t=0')

import pandas as pd
df=pd.DataFrame(total)
df.to_excel('qiancheng-pa.xls')


 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值