python2.7爬虫实例-Python2.7爬虫-爬取简书文章-入门

参考原文:

分为五个模块:主模块、URL管理器、下载器、解析器、输出器。

主模块首先调用URL管理器管理URL,接着将URL传给下载器,下载器发送请求到URL并得到响应数据,本例中除了第一次请求之外,再次请求时需要发送两个参数到服务器,将响应数据传给解析器解析出真正需要的数据,再将数据交给输出器输出。

这里爬取的是简书的新上榜文章 http://www.jianshu.com/recommendations/notes,当滑到底部点击"加载更多"时,会向服务器发送一个GET请求,可以使用火狐开发者浏览器查看,点击 网络->XHR,会新出来一个GET请求 :

ffc221e3e404

点击之前

ffc221e3e404

点击之后

点击这个GET请求,查看请求URL、请求参数以及响应数据:

ffc221e3e404

请求网址

ffc221e3e404

请求参数

ffc221e3e404

响应数据

查看源码可以观察到max_id其实是上次请求的返回数据中最后一个文章的data-recommended-at的值再减去1。

ffc221e3e404

而另一个参数data-category-id的值由图中可知。

也就是说第一次访问简书的“新上榜”文章时,可以直接访问网址 http://www.jianshu.com/recommendations/notes,当需要加载更多数据时,需要解析上次请求来的响应数据,我们可以使用BeautifulSoup第三方库解析html得到需要的数据。

这里我安装了anaconda2第三方库,python解析器使用anaconda2。

jianshu.py:

#!/Users/xiaoka/anaconda2/bin/python

# coding: utf-8

import urllib2

from bs4 import BeautifulSoup

class Splider:

def __init__(self):

self.manager = Manager()

self.downloader = Download()

self.parser = Parse()

self.outputer = Output()

def craw_search_word(self, root_url):

count = 0

self.manager.add_new_url(root_url)

while self.manager.has_new_url():

try:

if count >= 10000:

break

print "正在加载第" + str(count) + "到" + str(count + 15) + "条数据"

current_url = self.manager.get_new_url()

html_content = self.downloader.download(current_url)

new_url, data = self.parser.parse(root_url, html_content)

self.manager.add_new_url(new_url)

# self.outputer.collect(data)

self.outputer.output(data)

count += 15

except urllib2.URLError, e:

if hasattr(e, "reason"):

print "craw faild, reason: " + e.reason

class Manager(object):

def __init__(self):

self.new_urls = set()

self.old_urls = set()

def add_new_url(self, new_url):

if new_url is None:

return None

elif new_url not in self.new_urls and new_url not in self.old_urls:

self.new_urls.add(new_url)

def has_new_url(self):

return len(self.new_urls) != 0

def get_new_url(self):

new_url = self.new_urls.pop()

self.old_urls.add(new_url)

return new_url

class Download(object):

def download(self, url):

if url is None:

return None

headers = {

"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:52.0) Gecko/20100101 Firefox/52.0",

"Cookie" : "Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1466075280; __utma=194070582.826403744.1466075281.1466075281.1466075281.1; __utmv=194070582.|2=User%20Type=Visitor=1; signin_redirect=http%3A%2F%2Fwww.jianshu.com%2Fsearch%3Fq%3D%25E7%2594%259F%25E6%25B4%25BB%26page%3D1%26type%3Dnote; _session_id=ajBLb3h5SDArK05NdDY2V0xyUTNpQ1ZCZjNOdEhvNUNicmY0b0NtMnVuUUdkRno2emEyaFNTT3pKWTVkb3ZKT1dvbTU2c3c0VGlGS0wvUExrVW1wbkg1cDZSUTFMVVprbTJ2aXhTcTdHN2lEdnhMRUNkM1FuaW1vdFpNTDFsQXgwQlNjUnVRczhPd2FQM2sveGJCbDVpQUVWN1ZPYW1paUpVakhDbFVPbEVNRWZzUXh5R1d0LzE2RkRnc0lJSHJEOWtnaVM1ZE1yMkt5VC90K2tkeGJQMlVOQnB1Rmx2TFpxamtDQnlSakxrS1lxS0hONXZnZEx0bDR5c2w4Mm5lMitESTBidWE4NTBGNldiZXVQSjhjTGNCeGFOUlpESk9lMlJUTDVibjNBUHdDeVEzMGNaRGlwYkg5bHhNeUxJUVF2N3hYb3p5QzVNTDB4dU4zODljdExnPT0tLU81TTZybUc3MC9BZkltRDBiTEsvU2c9PQ%3D%3D--096a8e4707e00b06b996e8722a58e25aa5117ee9; CNZZDATA1258679142=1544596149-1486533130-https%253A%252F%252Fwww.baidu.com%252F%7C1486561790; _ga=GA1.2.826403744.1466075281; _gat=1",

"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"

}

content = ""

try:

request = urllib2.Request(url, headers = headers)

response = urllib2.urlopen(request)

content = response.read()

except urllib2.URLError, e:

if hasattr(e, "reason") and hasattr(e, "code"):

print e.code

print e.reason

else:

print "请求失败"

return content

class Parse(object):

def get_new_data(self, root_url, ul):

data = set()

lis = ul.find_all("li", {"class" : "have-img"})

for li in lis:

cont = li.find("div", {"class" : "content"})

title = cont.find("a", {"class" : "title"}).get_text()

title_url = root_url + cont.a["href"]

data.add((title, title_url))

return data

def get_new_url(self, root_url, ul):

lis = ul.find_all("li", {"class" : "have-img"})

data_category_id = ul["data-category-id"]

# 最后一个文章data-recommended-at -1

max_id = int(lis[-1]["data-recommended-at"]) - 1

new_url = root_url + "?data_category_id=" + data_category_id + "&max_id=" + str(max_id)

return new_url

def parse(self, root_url, content):

soup = BeautifulSoup(content, "html.parser", from_encoding="utf-8")

div = soup.find(id="list-container")

ul = div.find("ul", {"class" : "note-list"})

new_url = self.get_new_url(root_url, ul)

new_data = self.get_new_data(root_url, ul)

return new_url, new_data

class Output(object):

def __init__(self):

self.datas = set()

def collect(self, data):

if data is None:

return None

for item in data:

if item is None or item in self.datas:

continue

self.datas.add(item)

def output(self, data):

for item in data:

title, url = item

print title + " " + url

if __name__ == "__main__":

root_url = "http://www.jianshu.com/recommendations/notes"

splider = Splider()

splider.craw_search_word(root_url)

赋予jianshu.py执行权限后,终端执行./jianshu.py。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值