Java转python学习笔记-实战51job

from bs4 import BeautifulSoup #网页解析获取数据
import re #正则表达式
import urllib.request,urllib.error #制定url 获取数据
import xlwt#保存到excel
import sqlite3

def main():
    # baseurl = 'https://search.51job.com/list/080200,000000,0000,00,9,99,python,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
    datalist = getData()
    # savepath = '51job.xls'
    # saveData(datalist,savepath)
    savedbpath = 'job.db'
    saveDbDate(datalist,savedbpath)
    # askURL(baseurl)

findLink = re.compile(r'"job_href":"(.*?)"')#子链接
findName = re.compile(r'"job_name":"(.*?)"')#工作名称
findSalary = re.compile(r'"providesalary_text":"(.*?)"')#薪资
findAttribute = re.compile(r'"attribute_text":(.*?),"companysize_text":')#招聘条件
findCname = re.compile(r'"company_name":"(.*?)"')#公司名称
findCtype = re.compile(r'"companytype_text":"(.*?)"')#公司类型
findCsize = re.compile(r'"companysize_text":"(.*?)"')#公司规模
findJobwelf = re.compile(r'"jobwelf_list":(.*?),"attribute_text":')#福利待遇

#得到数据
def getData():
    datalist = []
    for i in range(1,41):#从第一页爬取到最后第二页
        url = 'https://search.51job.com/list/080200,000000,0000,00,9,99,python,2,'+ str(i) +'.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='#拼接每个子网页
        html = askURL(url)#保存网页源码
        #解析网页
        links = re.findall(findLink, html)
        li = []
        for link in links:
            link = re.sub('\\\\', "", link)  # 过滤"\"要4个"\"
            li.append(link)

        names = re.findall(findName, html)

        salarys = re.findall(findSalary, html)
        sa = []
        satypes = []
        for salary in salarys:
            salary = re.sub('\\\\', "", salary)  # 过滤"\"要4个"\"
            if(salary):
                if(salary[-1] == '日'):
                    salarytype = '日'
                elif (salary[-1] == '月'):
                    salarytype = '月'
                elif (salary[-1] == '年'):
                    salarytype = '年'
                salary = salary[0:-2]
                if(salary[-1] == '万'):
                    if ('-' in salary):
                        pre = re.split(r'[-]', salary)[0]
                        pre = str(int(float(pre) * 10))
                        rear = re.split(r'[-]', salary)[1]
                        rear = str(int(float(rear[0:-1]) * 10))
                        salary = pre + '-' + rear + 'K'
                elif (salary[-1] == '千'):
                    if ('-' in salary):
                        salary = salary[0:-1] + 'K'
            else:
                salary = '面议'
                salarytype = '面议'
            satypes.append(salarytype)
            sa.append(salary)


        attributes = re.findall(findAttribute, html)
        ats = []
        for attribute in attributes:
            attribute = re.sub('"', '', attribute)
            attribute = re.sub('\[', '', attribute)
            attribute = re.sub(']', '', attribute)
            attribute = re.sub('\\\\', '', attribute)
            at = attribute.split(',')
            ats.append(at)

        cnames = re.findall(findCname, html)

        ctypes = re.findall(findCtype, html)

        csizes = re.findall(findCsize,html)

        jobwelfs = re.findall(findJobwelf, html)
        for jobwelf in jobwelfs:
            jobwelf = re.sub('"', '', jobwelf)
            jobwelf = re.sub('\[', '', jobwelf)
            jobwelf = re.sub(']', '', jobwelf)
            jobwelf = jobwelf.split(',')

        i = 0
        for i in range(50):
            data = []
            data.append(li[i])
            data.append(names[i])
            try:
                if (sa):
                    if (len(sa) > 0):
                        data.append(sa[i])
                else:
                    data.append('面议')
            except :
                pass
            data.append(satypes[i])
            if(len(ats[i])==4):
               data.append(ats[i][1])
               data.append(ats[i][2])
               data.append(ats[i][3])
            elif ((len(ats[i]) == 3)):
                data.append('校招')
                data.append(ats[i][1])
                data.append(ats[i][2])
            elif ((len(ats[i]) == 2)):
                data.append('校招')
                data.append('经验不限')
                data.append(ats[i][1])
            data.append(cnames[i])
            data.append(ctypes[i])
            data.append(csizes[i])
            datalist.append(data)

    return datalist

# 得到指定网站的内容
def askURL(url):#模拟浏览器获取数据
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36 SE 2.X MetaSr 1.0"
    }
    request = urllib.request.Request(url,headers=head)#伪装浏览器
    html = ''
    try:
        response = urllib.request.urlopen(request)#发起请求
        html = response.read().decode('GBK')#解码网页源码
        # print(html)
    except urllib.error.URLError as e:#打印错误信息
        if hasattr(e,'code'):#编码问题
            print(e.code)
        if hasattr(e,'reason'):#打印错误原因
            print(e.reason)

    return html

# 保存数据
def saveData(datalist,savepath):
    print('save...')
    job = xlwt.Workbook(encoding='utf-8',style_compression=0)
    sheet = job.add_sheet('51job python 杭州',cell_overwrite_ok=True)
    col = ('链接','工作名','薪资','计算方式','工作经验','学历','招收人数','公司名','公司类型','公司规模')
    for i in range(0,10):
        sheet.write(0,i,col[i])
    for i in range(0,2000):
        data = datalist[i]
        for j in range(0,10):
            sheet.write(i+1,j,data[j])

    job.save(savepath)

def saveDbDate(datalist,savedbpath):
    init_db(savedbpath)
    conn = sqlite3.connect(savedbpath)
    cur = conn.cursor()

    for data in datalist:
        for index in range(len(data)):
            data[index] = '"' + data[index] + '"'
        sql = """
            insert into job
            (link,jbname,jbsalary,salarytype,experience,education,neednum,cname,ctype,csize)
            values (%s)"""%','.join(data)
        cur.execute(sql)
        conn.commit()
    cur.close()
    conn.close()

def init_db(savedbpath):#初始化数据表
    sql = """
        create table job 
        (
            id integer key authorization increment,
            link text,
            jbname varchar ,
            jbsalary text ,
            salarytype text ,
            experience text ,
            education text ,
            neednum text ,
            cname varchar ,
            ctype text ,
            csize text
        )
    """
    conn = sqlite3.connect(savedbpath)
    cur = conn.cursor()
    cur.execute(sql)
    conn.commit()
    conn.close()

if __name__ == '__main__':
    main()
    # init_db('job.db')
    print("爬取完毕")

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值