#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 14:25:43 2018
@author: ding
"""
import requests
from bs4 import BeautifulSoup
headers={'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
ll = []
def geturl(url):
res=requests.get(url,headers=headers)
res.encoding=res.apparent_encoding
soup=BeautifulSoup(res.text,'html.parser')
lianjie=soup.find_all('p',class_='t1')
for i in lianjie:
try:
lianjie2=i.find('a')['href']
ll.append(lianjie2)
except:
pass
return ll
#url='https://jobs.51job.com/shanghai-hpq/104752757.html?s=01&t=0'
total=[]
def getinfo(URL):
res=requests.get(URL,headers=headers)
res.encoding=res.apparent_encoding
soup=BeautifulSoup(res.text,'html.parser')
all=soup.find_all('div',class_='cn')
for each in all:
zhiwei=each.find('h1').text
diqu=each.find('span',class_='lname').text
gongsi=each.find('p',class_='cname').text.strip('\n')
jianjie=each.find('p',class_='msg ltype').text
jianjie1='--'.join(list(map(lambda x:x.strip(),jianjie.split('|'))))
xinzi=each.find('strong').text
all2=soup.find_all('div',class_='tCompany_main')
for each2 in all2:
jingyan=each2.find_all('span',class_='sp4')
jingyan1='--'.join(list(map(lambda x:x.text.strip(),jingyan)))
fuli=each2.find_all('p',class_='t2')
fuli1='--'.join('--'.join(list(map(lambda x:x.text.strip(),fuli))).split('\n'))
zhize=each2.find_all('div',class_='bmsg job_msg inbox')
for p in zhize:
zhize1=p.find_all('p')
zhize2='\n'.join(list(map(lambda x:x.text.strip(),zhize1)))
dizhi=each2.find('div',class_='bmsg inbox').text.strip('\t\t\t\t\t\t\t\n地图')
xinxi=each2.find('div',class_='tmsg inbox').text.strip()
info={'zhiwei':zhiwei,
'diqu':diqu,
'gongsi':gongsi,
'jianjie':jianjie1,
'xinzi':xinzi,
'jingyan':jingyan1,
'fuli':fuli1,
'zhize':zhize2,
'dizhi':dizhi,
'xinxi':xinxi}
total.append(info)
return total
if __name__ == '__main__':
url='https://search.51job.com/list/020000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='#只抓一页,可以for循环抓多页
for i in geturl(url)[1:]:
getinfo(i)
#getinfo('https://jobs.51job.com/shanghai-pdxq/105175200.html?s=01&t=0')
import pandas as pd
df=pd.DataFrame(total)
df.to_excel('qiancheng-pa.xls')