【Scrapy】 抓取开源中国 招聘信息

python 2.7 
scrapy 1.3.0
sqlalchemy
navicate

JobSpider.py

# -*- coding:utf-8 -*-
import logging
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from miao.items import JobDetail,CompanyDetail
from hashlib import md5
import os
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

class JobSpider(Spider):
	name = 'job'
	# start_urls = ['https://job.oschina.net/search?type=%E8%81%8C%E4%BD%8D%E6%90%9C%E7%B4%A2&key=web%E5%89%8D%E7%AB%AF&exp=0&edu=0&nat=1&city=%E5%85%A8%E5%9B%BD&p=1']
	keys = ['Java','Python','Hadoop','自然语言处理','搜索算法','全栈工程师','数据挖掘','后端其他','web前端',
			'HTML5','JavaScript','前端开发工程师','前端其他'
	]
	def start_requests(self):
		for k in self.keys:

			for i in range(1,12):
				url = 'https://job.oschina.net/search?type=职位搜索&key=%s&exp=0&edu=0&nat=1&city=全国&p=%s' %(k,i)
				yield Request(url=url,callback=self.parse)
	def parse(self,response):
		sel = Selector(response)

		list_jobdivs = sel.xpath('//div[@class="box clear layout"]')

		for div in list_jobdivs:
			job_url = div.xpath('./div[@class="flex-item-6 "]/div[@class="layout"]/div[@class="layout-left title"]/a/@href').extract()[0]
			company_url = div.xpath('./div[@class="flex-item-6 com-info"]/div[@class="layout clear"]/div[@class="layout-column"]/div[@class="layout"]/a/@href').extract()[0]
			yield Request(url=job_url,callback =self.parse_job)
			yield Request(url=company_url,callback =self.parse_company)


	def parse_company(self,response):
		sel = Selector(response)
		item = CompanyDetail()
		item['companyurl'] = response.url # 公司链接
		title = sel.xpath("//small/text()").extract()[0]
		item['companyname'] = title # 公司名称
		companydescs = sel.xpath("//div[@class='panel-body']")[0].re(r'<p>(.*?)</p>')
		companydesc = ''
		for c in companydescs:
			companydesc = companydesc + c
		companydesc = companydesc.replace("<br>","")
		item['companydesc'] = companydesc # 公司介绍
		# companyguimo = sel.xpath("//div[@class='col-xs-7']/ul[@class='lists text']/li")[1].xpath("./span[@class='size']/text()").extract()[0]
		# item['companyguimo'] = companyguimo # 公司规模
		# companyguanwang  = sel.xpath("//div[@class='col-xs-7']/ul[@class='lists text']/li")[2].xpath("./span[@class='page']/a/@href").extract()[0]
		# item['companyguanwang'] = companyguanwang
		# companyjieduan = sel.xpath("//div[@class='col-xs-7']/ul[@class='lists text']/li")[3].xpath("./span[@class='stage']").extract()[0]
		# item['companyjieduan'] = companyjieduan
		yield item
	def parse_job(self,response):
		# print response.url
		sel = Selector(response)
		item = JobDetail()
		item['joburl'] = response.url # 职位链接
		company = sel.xpath("//div[@class='col-xs-12']/h3[@class='text-left']/strong/a/@title").extract()[0]
		item['jobcompany'] = company # 公司名称
		title = sel.xpath("//h1/@title").extract()[0]
		item['jobcontent'] = title # 公司岗位
		jobmoney = sel.xpath("//div[@class='left']/div[@class='basic']/div[@class='clearfix row lh-md']/div[@class='col-xs-9']/div/b/text()").extract()[0]
		item['jobmoney'] = jobmoney # 工作薪资
		locations = sel.xpath("//div[@class='left']/div[@class='basic']/div[@class='clearfix row lh-md']/div[@class='col-xs-9']/div/a")
		jobneed = ''
		for l in locations:
			jobneed = jobneed + l.xpath("./text()").extract()[0].strip()+'/'
		item['jobneed'] = jobneed # 工作要求 包括地点 学历 经验 
		skillneed = ''
		skills = sel.xpath("//div[@class='left']/div[@class='basic']/div[@class='clearfix row lh-md']/div[@class='col-xs-9']/div/span[@id='ex-position-skills']/a")
		for s in skills:
			skillneed = skillneed + s.xpath("./text()").extract()[0].strip() + '/'
		skillneed = skillneed[:len(skillneed)-1]
		item['skillneed'] = skillneed # 技能要求
		pubtime = sel.xpath("//div[@class='left']/div[@class='basic']/p/text()").extract()[0]
		item['pubtime'] = pubtime # 发布时间

		jobdescs = sel.xpath("//div[@class='panel']/div[@class='panel-body']/div[@class='position-description']")[0].re(r'<p>(.*?)</p>')
		jobdesc = ''
		for j in jobdescs:
			jobdesc = jobdesc + j
		jobdesc = jobdesc.replace('<br>',"")
		item['jobdesc'] = jobdesc # 工作描述
		yield item

		# print company,title[0],response.url

pipeline.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs
from hashlib import md5
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column,String,create_engine, DateTime, Integer, Text, INT
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey,Table
from sqlalchemy.orm import relationship,backref
from datetime import datetime
from items import JobDetail,CompanyDetail
from sqlalchemy.dialects.mysql import LONGTEXT
engine = create_engine('mysql+mysqldb://root:1234@127.0.0.1:3306/iproxypool?charset=utf8')
DBSession = sessionmaker(bind=engine)
Base = declarative_base()

class Job(Base):
	__tablename__ = 'job'

	id = Column(Integer,primary_key = True,autoincrement = True)
	joburl = Column(String(255))
	jobcompany = Column(String(255))
	jobcontent = Column(String(255))
	jobmoney = Column(String(255))
	jobneed = Column(String(255))
	skillneed = Column(String(255))
	pubtime = Column(String(255))
	jobdesc = Column(LONGTEXT)
class Company(Base):
	__tablename__ = "company"

	id = Column(Integer,primary_key = True,autoincrement = True)
	companyurl = Column(String(255))
	companyname = Column(String(255))
	companydesc = Column(LONGTEXT)

class JobPipeline(object):

	def open_spider(self,spider):
		self.session = DBSession()
	def process_item(self,item,spider):
		if isinstance(item,JobDetail):
			isexists = self.session.query(Job).filter(Job.joburl == item['joburl']).all()
			if isexists:
				self.session.query(Job).filter(Job.joburl==item['joburl']).update({Job.joburl:item['joburl']})
				self.session.query(Job).filter(Job.jobcompany==item['jobcompany']).update({Job.jobcompany:item['jobcompany']})
				self.session.query(Job).filter(Job.jobcontent==item['jobcontent']).update({Job.jobcontent:item['jobcontent']})
				self.session.query(Job).filter(Job.jobmoney==item['jobmoney']).update({Job.jobmoney:item['jobmoney']})
				self.session.query(Job).filter(Job.jobneed==item['jobneed']).update({Job.jobneed:item['jobneed']})
				self.session.query(Job).filter(Job.skillneed==item['skillneed']).update({Job.skillneed:item['skillneed']})
				self.session.query(Job).filter(Job.pubtime==item['pubtime']).update({Job.pubtime:item['pubtime']})
				self.session.query(Job).filter(Job.jobdesc==item['jobdesc']).update({Job.jobdesc:item['jobdesc']})
				self.session.commit()
			else:
				jobs = Job(joburl=item['joburl'],
					jobcompany=item['jobcompany'],
					jobcontent=item['jobcontent'],
					jobmoney=item['jobmoney'],
					jobneed=item['jobneed'],
					skillneed=item['skillneed'],
					pubtime=item['pubtime'],
					jobdesc=item['jobdesc'])
				self.session.add(jobs)
				self.session.commit()
		else:
			isexists = self.session.query(Company).filter(Company.companyname == item['companyname']).all()

			if isexists:
				self.session.query(Company).filter(Company.companyurl==item['companyurl']).update({Company.companyurl:item['companyurl']})
				self.session.query(Company).filter(Company.companyname==item['companyname']).update({Company.companyname:item['companyname']})
				self.session.query(Company).filter(Company.companydesc==item['companydesc']).update({Company.companydesc:item['companydesc']})
				# self.session.query(Company).filter(Company.companyguimo==item['companyguimo']).update({Company.companyguimo:item['companyguimo']})
				# self.session.query(Company).filter(Company.companyguanwang==item['companyguanwang']).update({Company.companyguanwang:item['companyguanwang']})
				# self.session.query(Company).filter(Company.companyjieduan==item['companyjieduan']).update({Company.companyjieduan:item['companyjieduan']})
				self.session.commit()
			else:
				companys = Company(companyurl=item['companyurl'],
					companyname=item['companyname'],
					companydesc=item['companydesc'])
					# companyguimo=item['companyguimo'],
					# companyguanwang=item['companyguanwang'],
					# companyjieduan=item['companyjieduan'])
				self.session.add(companys)
				self.session.commit()

item.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

from scrapy import Field,Item

class JobDetail(Item):
	joburl = Field() # 工作链接
	jobcompany = Field() # 公司名称
	jobcontent = Field() # 工作岗位
	jobmoney = Field() # 工作薪资
	jobneed = Field() # 工作要求
	skillneed = Field() # 技能要求
	pubtime = Field() # 发布时间
	jobdesc = Field() # 职位描述

class CompanyDetail(Item):
	companyurl = Field() # 公司链接
	companyname = Field() # 公司名称
	companydesc = Field() # 公司介绍

setting.py

ITEM_PIPELINES = {
   # 'miao.pipelines.BaiduPipeline': 300,
   # 'miao.pipelines.XiciPipeline': 300,
   # 'miao.pipelines.ChinaPipeline': 300,
   'miao.pipelines.JobPipeline':300,
}

转载于:https://my.oschina.net/whitejavadog/blog/1036960

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值