使用scrapy编写的爬虫

import scrapy
import BeautifulSoup as bs4
import codecs
import wget
import os, sys
import time

ENCODE = 'GB18030'
HTML_ENCODE = 'utf8'
DIR_NAME = r'e:\meinv\images'

ATTR_HREF = 'href'
ATTR_SRC = 'src'

DOWNLOAD_FAIL_LST = []
MAX_FAIL_COUNT = 50
FAIL_WAIT_COUNT = 5

def init_dir(type):
	dir = DIR_NAME + os.sep + type + os.sep
	if not os.path.exists(dir):
		os.makedirs(dir)
	return dir

def download(type,src):
	dir = init_dir(type)
	file = dir + src.split('/')[-1]
	try:
		if not os.path.isfile(file):
			wget.download(src, file)
	except:
		DOWNLOAD_FAIL_LST.append(src)

		if len(DOWNLOAD_FAIL_LST) > MAX_FAIL_COUNT:
			print 'mostly is error, down load fail is too much!'
			sys.exit(0)

		if (len(DOWNLOAD_FAIL_LST) % FAIL_WAIT_COUNT) == 0:
			time.sleep(60)
			print 'sleep while for download file ....'

class MeinvSpider(scrapy.Spider):
	name = 'meinv'
	allowed_domains=['www.4j4j.cn']
	start_urls = [
			'http://www.4j4j.cn/beauty/'
			]

	class ReadAlbum(object):
		def __init__(self,type):
			self.type = type

		#Download picutre
		def read_page(self, response):
			b = bs4.BeautifulSoup(response.body)
			r = b.findAll(name='div', attrs={'class':'pic-image-wrapper'})
			if not r:
				self.log('read page error!')
				return

			r = r[0]
			img = r.findAll('img')[0][ATTR_SRC].encode(ENCODE)
			if img:
				download(self.type, img)

		#Download album
		def read_album(self, response):
			b = bs4.BeautifulSoup(response.body)
			r = b.findAll(name='ul',attrs={'class':'pic-thumb-list'})
			if not r:
				self.log('read album error!')
				return
			r = r[0]
			hrefs = r.findAll('a')
			for i in hrefs:
				if i[ATTR_HREF]:
					src = i[ATTR_HREF]
					yield scrapy.Request(src, callback=self.read_page)

	def read_tag(self, response):
		b = bs4.BeautifulSoup(response.body)
		r = b.findAll(name='ul',attrs={\
				'id':'pic-list'})
		if len(r) == 0:
			self.log('read tag error!')
			return

		tag_t = b.findAll(name='li', attrs={'class':'current'})[0]
		type = tag_t.findAll('a')[0].string.encode(ENCODE)

		r = r[0]
		mag = r.findAll(name='span', attrs={'class':'magnifier'})
		for i in mag:
			a = i.findAll('a')[0]
			if a[ATTR_HREF]:
				src = a[ATTR_HREF].encode(ENCODE)
				ra = self.ReadAlbum(type)
				yield scrapy.Request(src, callback=ra.read_album)

		#Jump to next tag_page, if exists
		after = b.findAll('a', attrs={'class':'after'})
		if after:
			after = after[0]
			href = after[ATTR_HREF]
			if href:
				#self.log('--------!!!Jump to after page!!!!---------')
				yield scrapy.Request(href, callback=self.read_tag)

	def read_tags(self, body):
		b = bs4.BeautifulSoup(body)
		r = b.findAll(name='ul',attrs={'class':'tags clearfix'})
		if len(r) != 1:
			self.log('read tags error!')
			return

		r = r[0]
		tags = r.findAll(name='a')
		ret = {}
		for i in tags:
			if i.string:
				k = i.string.encode(ENCODE)
				if i[ATTR_HREF]:
					ret[k] = i[ATTR_HREF].encode(ENCODE)

		return ret

	def parse(self, response):
		r = self.read_tags(response.body)
		for i in r:
			yield scrapy.Request(r[i], callback=self.read_tag)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值