参考博文:Python爬虫实战一之爬取糗事百科段子
爬取的文章列表
代码:
# -*- coding: utf-8 -*-
import urllib2
import string
import re
# 发送请求,得到返回的结果html
def getHtml(url):
try:
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.36')
resp = urllib2.urlopen(req)
html = resp.read()
except urllib2.URLError, e:
if hasattr(e, 'code'):
print e.code
if hasattr(e, 'reason'):
print e.reason
return html
# 解析链接
def parserLinks(links):
for ls in links:
for l in ls:
getContent(l)
# 根据链接爬取文章的标题和摘要等信息
def getContent(l):
url = 'http://www.xinnong.com'
url += l
html = getHtml(url)
pattern = re.compile('<div.*?class="arctit">.*?<h1>(.*?)</h1>.*?<div.*?class="arcinfo">(.*?) (.*?)</div>'
+ '.*?<div.*?class="arcdes">(.*?)</div>.*?</div>.*?<div.*?class="arcont" id="article">(.*?)</div>', re.S)
items = pattern.findall(html)
for item in items:
print "url =", url, "title =", item[0], "date =", item[1], "from =", item[2], "digest =", item[3], item[4]
# 获取文章链接分页列表
def getLinks(startPage, endPage):
my_links = []
for i in range(startPage, endPage):
url = 'http://www.xinnong.com/jishu/liangyou/xiaomai/'
url += 'p' + str(i) + '.shtml'
html = getHtml(url)
newslist_index = html.find('newslist')
lstpage_index = html.find('lstpage', newslist_index)
html = html[newslist_index - 12 : lstpage_index - 18]
linkPattern = re.compile("href=\"(.+?)\"")
links = linkPattern.findall(html);
my_links.append(links)
return my_links
my_links = getLinks(1, 2)
parserLinks(my_links)
爬取结果:
「更多精彩内容请关注公众号geekymv,喜欢请分享给更多的朋友哦」