scrapy爬虫简单实例

1.CentOS安装scrapy

安装工具包

[root@localhost ~]# yum groupinstall development
[root@localhost ~]# yum install libffi-devel python-devel openssl-devel  libxslt-devel

如果没有pip安装工具,安装pip
首先安装新的yum软件源,centos7自带的软件源没有pip

[root@localhost ~]# yum install epel-release
[root@localhost ~]# yum install python-pip

安装scrapy:

[root@localhost ~]# pip install scrapy
[root@localhost ~]# pip install scrapy -i http://pypi.douban.com/simple --trusted-host pypi.douban.com
[root@localhost ~]# scrapy -v #查看scrapy 版本
2.scrapy爬虫实例

采用scrapy框架进行爬取。现在一爬取51cto为例子。打开cmd,先用命令行,定位到某个目录,然后执行’scrapy startproject 项目名’,创建爬虫项目,然后打开item文件,代码如下:
创建爬虫项目:

scrapy startproject spiderCTO

紧接着如果想一键创建爬虫文件:

cd spiderCTO
scrapy genspider spiders #这种创建方式无效
scrapy genspider spiders '爬取的初始url'
#如:
scrapy genspider spiders example.com
Created spider 'spiders' using template 'basic' in module:
  jobs.spiders.crawl
# -*- coding: utf-8 -*-
import scrapy
class SpiderctoItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
	title = scrapy.Field()
	url = scrapy.Field()
	score = scrapy.Field()
	hour = scrapy.Field()
	couse_long = scrapy.Field()
	student = scrapy.Field()
	price = scrapy.Field()
	updata = scrapy.Field()

接下来创建spiders文件里创建MySpider文件,即爬虫文件,代码如下:

# -*- coding: utf-8 -*-</code>

import scrapy

from spiderCTO.items import SpiderctoItem

class MySpider(scrapy.Spider):

	name = 'MySpider'

	start_urls = ['http://edu.51cto.com/center/course/index/list?edunav=&amp;page=1']

def parse(self,response):

	item = SpiderctoItem()

	for box in response.xpath('.//div[@class="Page CourseList"]/div[@class="cList"]/div[@class="cList_Item"]'):

		item['title'] = box.xpath('div[@class="main"]/h3/a/text()').extract()[0]

		item['url'] = box.xpath('div[@class="pic"]/a/@href').extract()[0]

		item['score'] = box.xpath('div[@class="main"]/div[@class="stars02"]/text()').extract()[0].strip()

		item['student'] = box.xpath('div[@class="main"]/div/p[@class="fr"]/text()').extract()[0]

		item['price'] = box.xpath('div[@class="main"]/div[@class="price"]/h4/text()').extract()[0]

		yield scrapy.Request(item['url'],callback=self.parseNext,meta=item)

def parseNext(self,response):

	item = response.meta

	item['couse_long'] = response.xpath('.//div[@class="Info"]/div[@class="main   fr"]/div[@class="CourseTime"]/span/text()').extract()[0]

	item['hour'] = response.xpath('.//div[@class="Info"]/div[@class="main fr"]/div[@class="CourseTime"]/span/text()').extract()[1]

	item['updata'] = response.xpath('.//div[@class="Info"]/div[@class="main fr"]/div[@class="CourseTime"]/span/text()').extract()[2]

	yield item

	for x in range(2,162):

		page = 'http://edu.51cto.com/center/course/index/list?edunav=&amp;page=' + str(x)

		yield scrapy.Request(page,callback=self.parse)

接下来就是编写输出文件Pipeline.py,代码如下:

# -*- coding: utf-8 -*-</code>

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
#写入json

# import codecs

# import json

# from scrapy.exceptions import DropItem

# class SpiderctoPipeline(object):

# def __init__(self):

	# self.file = open('data.json','w')

	# self.file = codecs.open('data.json','w',encoding='utf-8')

# def process_item(self, item, spider):

	# line = json.dumps(dict(item),ensure_ascii=False) + '\n'

	# self.file.write(line)

	# return item
#写入数据库
from twisted.enterprise import adbapi

import pymysql

import pymysql.cursors

class SpiderctoPipeline(object):

	def __init__(self,dbpool):

		self.dbpool = dbpool

	@classmethod

	def from_settings(cls,setting):

		dbpool=adbapi.ConnectionPool('pymysql',host='127.0.0.1',

		db='cto',user='root',password='123456',charset='utf8',cursorclass=pymysql.cursors.DictCursor,use_unicode=True)
		
		return cls(dbpool)

	def process_item(self, item, spider):

		self.dbpool.runInteraction(self.do_insert,item)

	def do_insert(self,cursor,item):

		insert_info = """
			insert into ctolist(title,url,score,hour,student,couse_long,price,updata)
			values (%s,%s,%s,%s,%s,%s,%s,%s)
			"""
		params = (item['title'],item['url'],item['score'],item['hour'],item['student'],item['couse_long'],item['price'],item['updata'])

		cursor.execute(insert_info,params)

最后就是把settings文件里设置ITEM_PIPELINES = {‘spiderCTO.pipelines.SpiderctoPipeline’: 1,}
如果想要定时执行爬虫,那就要创建一个定时文件:
一下就是一个定时文件(timeout.py),
然后再终端中执行该文件即可定时执行爬虫语句。
即:

python timeout.py
# -*- coding: utf-8 -*-
import schedule
import time
import os
import subprocess
def job():
	print("更新数据中。。。。")
	subprocess.Popen('scrapy crawl Myspider')
#每分钟执行爬虫
schedule.every().minutes.do(job)
#每小时执行
# schedule.every().hour.do(job)
#每天10点40执行
# schedule.every().day.at("10:40").do(job)
#每周一执行
# schedule.every().monday.do(job)
#每周三13点15分执行
# schedule.every().wednesday.at("13:15").do(job)

while True:
    schedule.run_pending()
    time.sleep(1)

最后介绍一下Ubuntu安装scrapy

sudo apt-get install python-dev
sudo apt-get install libevent-dev
#其中python3.6是你电脑安装的scrapy版本
sudo apt-get install python-twisted-web python3.6-dev
pip install Scrapy

或者执行以下命令:

sudo apt-get update
sudo apt-get install python-pip python-lxml python-crypto python-cssselect python-openssl python-w3lib python-twisted python-dev libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev
sudo pip install scrapy

在此过程中如果遇到print打印调试,在控制台不出结果的话,可以把settings中的ROBOTSTXT_OBEY设置为false:

ROBOTSTXT_OBEY = False
  • 2
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值