利用DJANGO在GAE上实现小型网络爬虫

Google App Engine 和 Django 都可以使用 WSGI 标准运行应用程序。因此,可在 Google App Engine 上使用几乎整个 Django 堆栈,包括中间件。作为开发人员,唯一需要做的调整就是修改您的 Django 数据模型以利用 Google App Engine 数据存储 API 与快速、可扩展的 Google App Engine 数据存储配合使用。Django 和 Google App Engine 的模型概念类似,作为 Django 开发人员,可以快速调整应用程序以使用GAE的数据存储。
Google App Engine 默认的版本是0.96,当然了,你也可以升级到1.0。为了使用GAE,DJANGO的MODEL当然是不能用了,所以就将就GOOGLE的了,VIEW和URL还是很有用的。下面是我的部分代码和说明。
app.yaml:

application: mydjango-test
version: 2
runtime: python
api_version: 1
handlers:
- url: /.*
script: bootstrap.py

注意,application必须与你在GAE的名字一样,不让会同步不上去。
接下来是一个很重要的模块,就是bootstrap.py了,我们不需要了解他到底是怎样工作的,只要进入网站之后,通过URL,他就会自动映射到DJANGO了,其它的URL就不用在这配了,这时候就轮到我们的DJANGO的URLS发威了。
我的VIEWS模块代码:

#-*- coding: utf8 -*-
from google.appengine.ext import db
from django import http
from django import shortcuts
from datetime import datetime
from google.appengine.api import urlfetch
import re
from BeautifulSoup import BeautifulSoup
dict = {u'一':1,u'两':2,u'三':3,u'四':4,u'五':5}

class Sources(db.Model):
#id = db.IntegerProperty(required = True)
name = db.StringProperty(required =True)

class Articles(db.Model):
#id = db.IntegerPropeirty()
titles = db.StringProperty()#required = True)
link = db.LinkProperty()#required = True)
source_id = db.IntegerProperty()
content = db.TextProperty()
price = db.FloatProperty()
region = db.StringProperty()
subregion = db.StringProperty()
area = db.IntegerProperty()
area_name = db.StringProperty()
community = db.StringProperty()
room_layout = db.IntegerProperty()
pub_date = db.DateTimeProperty()
crawl_date = db.DateTimeProperty()
original_id = db.IntegerProperty()

def ReturnText(element):
text =""
try:
elements = element.contents
for e in elements:
text += ReturnText(e).strip()
except:
text = element
return (text)

def parse(data):
data = data.decode("utf8")
re_s = u"""<tr>\s+<td class="center" style="width:85px"><b>(\d+)</b></td>\s+<td class="center" style="width:85px">(\d)室(\d)厅(\d)卫</td>\s+<td class="t"><a href="(.{1,300}?)" target="_blank" class="t">(.{1,300}?)</a>.{1,300}?<td class="center" style="width:100px">(.{1,300}?)</td>\s+</tr>"""
items = re.compile(re_s,re.M|re.S).findall(data)
return items

def sub_parse(data):
soup = BeautifulSoup(data)
sTag = soup.find('span',{'class':'updatedate'})
p = soup.find('span',{'class':'rednum'})
if p:
p = p.string.strip()
try:
price = float(p)
except:
price = 0

str_date = sTag.string.strip().split(u":")[-1]
pub_date = datetime.strptime(str_date,'%Y-%m-%d %H:%M')
cTag = soup.find('code')
content = ReturnText(cTag)
table = soup.find('table',{'class':'boxleft'})
trs = table.findAll('tr')
aTag = trs[0].findAll('a')
if len(aTag) == 1:
subregion = aTag[0].string.strip()
area_name = ''
else:
subregion = aTag[0].string.strip()
area_name = aTag[1].string.strip()
try:
community = trs[1].find('a').string.strip()
except:
dTag = trs[1].findAll('td')[-1]
community = dTag.string.strip()

for tr in trs[2:]:
txt = tr.find('td').string.strip()
if txt == u'居室:' or txt == u'房型:':
tmp = tr.findAll('td')[-1].string.strip()
num = tmp.split(u'室')[0]
li = dict.keys()
for key in dict.keys():
if key == num:
value = dict[key]
break
n = re.sub('\D','',tmp)
room_layout = int(str(value) + str(n))

break
for tr in trs[2:]:
txt = tr.find('td').string.strip()
if txt == u'建筑面积:' or txt == u'合租情况:':
tmp = tr.findAll('td')[-1].string.strip()
area = re.sub('\D','',tmp)
if area == u'':
area = 0
else:
area = int(area)
break
else:
area = 0


return (price,pub_date,content,subregion,area_name,community,area,room_layout)

def deal_func(item):
try:
price = float(item[0])
except:
price = 0
link = item[4]
tmp = link.split('/')[-1]
original = re.sub('\D','',tmp)
original = int(original)
title = item[5]

data = open_page(link)
result = sub_parse(data)

source_id = 1
article = Articles()
article.price = price
article.titles = title
article.link = link
article.pub_date = result[1]
article.source_id = source_id
article.content = result[2]
article.region = 'Beijing'
article.subregion = result[3]
article.area = result[6]
article.area_name = result[4]
article.community = result[5]
article.room_layout = result[7]
article.crawl_date = datetime.now()
article.original_id = original
article.put()

def open_page(url):
res = urlfetch.fetch(url)
data =res.content
return data

def home_page(request):
url = "http://bj.58.com/haidian/zufang/0/"
data = open_page(url)
items = parse(data)
for item in items:
deal_func(item)
variable = 'If you want to check the data,please click the link'
return shortcuts.render_to_response('index.html',{'variable':variable})

def result_page(request):
temp = db.GqlQuery("SELECT * FROM Articles")
results = temp.fetch(1000)
return shortcuts.render_to_response('result.html',{'results':results})

URLS模块:

from django.conf.urls.defaults import *
from views import *
urlpatterns = patterns(
(r'^$',home_page),
(r'^result$',result_page),
)

SETTINGS模块:

import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG

ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)

MANAGERS = ADMINS

DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.

# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Chongqing'

# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html

LANGUAGE_CODE = 'en-US'
FILE_CHARSET = 'utf-8'
DEFAULT_CHARSET = 'utf-8'
SITE_ID = 1

# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True

# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''

# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''

# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'

# Make this unique, and don't share it with anybody.
SECRET_KEY = '^ab3&g(clwew7@#=*&-$4i$09kirn&q$1$h6)^*8^9pdblpm&f'

# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
#'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)

MIDDLEWARE_CLASSES = (
#'django.middleware.common.CommonMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
)

ROOT_URLCONF = 'urls'

TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__),'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)

INSTALLED_APPS = (
#'django.contrib.auth',
'django.contrib.contenttypes',
#'django.contrib.sessions',
'django.contrib.sites',
#'website.mywebsite'
)

注意,由于是在GAE中使用,所以很多ADMIN的组件和部分中间件是没办法使用的,我都注释掉了。
所以写小细节,当初本来是打算用纯粹的正则来写,发现太麻烦了,后来发现有BeautifulSoup可以用,工作量一下就减轻了,我的附件中包含了所有的源代码,当让了,还有用BEAUTIFULSOUP写的爬去58.com的爬虫,喜欢的下载了慢慢浏览吧。如果对BEAUTIFULSOUP不熟悉的可以到[url]http://www.crummy.com/software/BeautifulSoup/documentation.html[/url]浏览相关文档。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值