2.[代码]Python 知乎日报爬虫
# -*- coding:utf-8 -*-
import urllib2
import re
import HTMLParser
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#通过python请求获取HTML
def getHtml(url):
header={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1','Referer' : '******'}
request=urllib2.Request(url,None,header)
response=urllib2.urlopen(request)
text=response.read()
return text
#通过python HTML解析出每条日报的链接
def getUrls(html):
pattern = re.compile('http://daily.zhihu.com/story/(.*?)" >',re.S)
items = re.findall(pattern,html)
urls = []
for item in items:
urls.append('http://daily.zhihu.com/story/' + item)
return urls
#python解析日报内容
""" www.iplaypy.com """
def getContent(url):
html = getHtml(url)
#先取出标题打印出来
pattern = re.compile('
(.*?)
')items = re.findall(pattern,html)
print '********************************************************************************************************************************************'
print '****************************************************'+items[0]+'****************************************************'
print '********************************************************************************************************************************************'
#开始取文章内容
pattern = re.compile('
\n(.*?)