系列 —— Python爬虫实战
题目 —— 抓取百度贴吧帖子
语言 —— Python
目标 —— 1、抓取帖子
2、指定是否只看楼主
3、将抓取的内容保存文件
步骤
1、踩点,分析目标URL
此处选取明信片吧的一个帖子:http://tieba.baidu.com/p/3939688645?see_lz=1&pn=1
分析一下:
http:// 代表资源传输使用http协议
tieba.baidu.com 是百度的二级域名,指向百度贴吧的服务器。
/p/3939688645 是服务器某个资源,即这个帖子的地址定位符
see_lz和pn是该URL的两个参数,分别代表了只看楼主和帖子页码,等于1表示该条件为真
所以我们可以把URL分为两部分,一部分为基础部分,一部分为参数部分。
2、页面抓取
编写代码:
=====================================================
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class BDTB:
def __init__(self,baseURL,seeLZ):
self.baseURL = baseURL
self.seeLZ = '?see_lz='+str(seeLZ)
def getPage(self,pageNum):
try:
url = self.baseURL+self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
print response.read()
return response
except urllib2.URLError,e:
if hasattr(e,"reason"):
print "fail to connect,reason:",e.reason
return None
baseURL = 'http://tieba.baidu.com/p/3939688645'
bdtb = BDTB(baseURL,1)
bdtb.getPage(1)
=====================================================
结果:运行成功,返回指定页面HTML代码
3、提取相关信息
1)提取标题
所以增加一个获取标题的方法:
=======================================================
def getTitle(self):
page = self.getPage(1)
pattern = re.compile('<h1 class="core_title_txt.*?>(.*?)</h1>',re.S)
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
=========================================================
2)提取页码
增加一个获取总页数的方法:
===========================================================
#获取帖子一共有多少页
def getPageNum(self):
page = self.getPage(1)
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result = re.search(pattern,page)
if result:
#print result.group(1) #测试输出
return result.group(1).strip()
else:
return None
============================================================
3)提取正文
先增加一个去除各种多余的标签的方法:
===========================================================
class Tool:
removeImg = re.compile('<img.*?>| {7}|')
removeAddr = re.compile('<a.*?>|</a>')
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
replaceTD = re.compile('<td>')
replacePara = re.compile('<p.*?>')
replaceBR = re.compile('<br><brr>|<br>')
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sun(self.removeExtraTag,"",x)
return x.strip()
===========================================================
然后获取正文
========================================================
def getContent(self,page):
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern,page)
floor = 1
for item in items:
print floor,u"楼------------------------------------------------------------------------------------------------------------------------------------\n"
print self.tool.replace(item)
floor += 1
========================================================
至此,测试代码,目前成功抓取到想要的内容
4、完善代码,并写入文件
=========================================================
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class Tool:
removeImg = re.compile('<img.*?>| {7}|')
removeAddr = re.compile('<a.*?>|</a>')
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
replaceTD = re.compile('<td>')
replacePara = re.compile('<p.*?>')
replaceBR = re.compile('<br><brr>|<br>')
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sub(self.removeExtraTag,"",x)
return x.strip()
class BDTB:
def __init__(self,baseURL,seeLZ,floorTag):
self.baseURL = baseURL
self.seeLZ = '?see_lz='+str(seeLZ)
self.tool = Tool()
self.file = None
self.floor = 1
self.sefaultTitle = u"百度贴吧" #默认的标题,如果没有成功获取到标题的话则会用这个标题
self.floorTag = floorTag
def getPage(self,pageNum):
try:
url = self.baseURL+self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
return response.read().decode('utf-8')
except urllib2.URLError,e:
if hasattr(e,"reason"):
print "fail to connect,reason:",e.reason
return None
def getTitle(self,page):
pattern = re.compile('<h1 class="core_title_txt.*?>(.*?)</h1>',re.S)
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
def getPageNum(self,page):
pattern = re.compile('<li class="l_reply_num".*?>*?</span>.*?<span.*?>(.*?)</span>')
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
def getContent(self,page):
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern,page)
contents = []
for item in items:
content = "\n"+self.tool.replace(item)+"\n"
contents.append(content.encode('utf-8'))
return contents
def setFileTitle(self,title):
if title is not None:
self.file = open(title + ".txt","w+")
else:
self.file = open(self.defaultTitle + ".txt","w+")
def writeData(self,contents):
for item in contents:
if self.floorTag == '1':
floorLine = "\n" + str(self.floor) + u"-----------------------------------------------------------------------------------------\n"
self.file.write(floorLine)
self.file.write(item)
self.floor += 1
def start(self):
indexPage = self.getPage(1)
pageNum = self.getPageNum(indexPage)
title = self.getTitle(indexPage)
self.setFileTitle(title)
if pageNum == None:
print "URL已失效,请重试"
return
try:
print "该帖子共有" + str(pageNum) + "页"
for i in range(1,int(pageNum)+1):
print "正在写入第" + str(i) + "页数据"
page = self.getPage(i)
contents = self.getContent(page)
self.writeData(contents)
except IOError,e:
print "写入异常,原因:" + e.message
finally:
print "写入任务完成!!!"
print u"请输入帖子代号"
baseURL = 'http://tieba.baidu.com/p/' + str(raw_input(u'http://tieba.baidu.com/p/'))
seeLZ = raw_input("是否只获取楼主发言,是输入1,否输入0\n")
floorTag = raw_input("是否写入楼层信息,是输入1,否输入0\n")
bdtb = BDTB(baseURL,seeLZ,floorTag)
bdtb.start()
================================================================================
测试结果如下:
题目 —— 抓取百度贴吧帖子
语言 —— Python
目标 —— 1、抓取帖子
2、指定是否只看楼主
3、将抓取的内容保存文件
步骤
1、踩点,分析目标URL
此处选取明信片吧的一个帖子:http://tieba.baidu.com/p/3939688645?see_lz=1&pn=1
分析一下:
http:// 代表资源传输使用http协议
tieba.baidu.com 是百度的二级域名,指向百度贴吧的服务器。
/p/3939688645 是服务器某个资源,即这个帖子的地址定位符
see_lz和pn是该URL的两个参数,分别代表了只看楼主和帖子页码,等于1表示该条件为真
所以我们可以把URL分为两部分,一部分为基础部分,一部分为参数部分。
2、页面抓取
编写代码:
=====================================================
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class BDTB:
def __init__(self,baseURL,seeLZ):
self.baseURL = baseURL
self.seeLZ = '?see_lz='+str(seeLZ)
def getPage(self,pageNum):
try:
url = self.baseURL+self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
print response.read()
return response
except urllib2.URLError,e:
if hasattr(e,"reason"):
print "fail to connect,reason:",e.reason
return None
baseURL = 'http://tieba.baidu.com/p/3939688645'
bdtb = BDTB(baseURL,1)
bdtb.getPage(1)
=====================================================
结果:运行成功,返回指定页面HTML代码
3、提取相关信息
1)提取标题
分析源代码可知标题所在代码段如下:
所以增加一个获取标题的方法:
=======================================================
def getTitle(self):
page = self.getPage(1)
pattern = re.compile('<h1 class="core_title_txt.*?>(.*?)</h1>',re.S)
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
=========================================================
2)提取页码
分析源代码,代码段如下:
增加一个获取总页数的方法:
===========================================================
#获取帖子一共有多少页
def getPageNum(self):
page = self.getPage(1)
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result = re.search(pattern,page)
if result:
#print result.group(1) #测试输出
return result.group(1).strip()
else:
return None
============================================================
3)提取正文
分析源代码,代码段如下
先增加一个去除各种多余的标签的方法:
===========================================================
class Tool:
removeImg = re.compile('<img.*?>| {7}|')
removeAddr = re.compile('<a.*?>|</a>')
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
replaceTD = re.compile('<td>')
replacePara = re.compile('<p.*?>')
replaceBR = re.compile('<br><brr>|<br>')
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sun(self.removeExtraTag,"",x)
return x.strip()
===========================================================
然后获取正文
========================================================
def getContent(self,page):
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern,page)
floor = 1
for item in items:
print floor,u"楼------------------------------------------------------------------------------------------------------------------------------------\n"
print self.tool.replace(item)
floor += 1
========================================================
至此,测试代码,目前成功抓取到想要的内容
4、完善代码,并写入文件
=========================================================
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class Tool:
removeImg = re.compile('<img.*?>| {7}|')
removeAddr = re.compile('<a.*?>|</a>')
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
replaceTD = re.compile('<td>')
replacePara = re.compile('<p.*?>')
replaceBR = re.compile('<br><brr>|<br>')
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sub(self.removeExtraTag,"",x)
return x.strip()
class BDTB:
def __init__(self,baseURL,seeLZ,floorTag):
self.baseURL = baseURL
self.seeLZ = '?see_lz='+str(seeLZ)
self.tool = Tool()
self.file = None
self.floor = 1
self.sefaultTitle = u"百度贴吧" #默认的标题,如果没有成功获取到标题的话则会用这个标题
self.floorTag = floorTag
def getPage(self,pageNum):
try:
url = self.baseURL+self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
return response.read().decode('utf-8')
except urllib2.URLError,e:
if hasattr(e,"reason"):
print "fail to connect,reason:",e.reason
return None
def getTitle(self,page):
pattern = re.compile('<h1 class="core_title_txt.*?>(.*?)</h1>',re.S)
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
def getPageNum(self,page):
pattern = re.compile('<li class="l_reply_num".*?>*?</span>.*?<span.*?>(.*?)</span>')
result = re.search(pattern,page)
if result:
return result.group(1).strip()
else:
return None
def getContent(self,page):
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern,page)
contents = []
for item in items:
content = "\n"+self.tool.replace(item)+"\n"
contents.append(content.encode('utf-8'))
return contents
def setFileTitle(self,title):
if title is not None:
self.file = open(title + ".txt","w+")
else:
self.file = open(self.defaultTitle + ".txt","w+")
def writeData(self,contents):
for item in contents:
if self.floorTag == '1':
floorLine = "\n" + str(self.floor) + u"-----------------------------------------------------------------------------------------\n"
self.file.write(floorLine)
self.file.write(item)
self.floor += 1
def start(self):
indexPage = self.getPage(1)
pageNum = self.getPageNum(indexPage)
title = self.getTitle(indexPage)
self.setFileTitle(title)
if pageNum == None:
print "URL已失效,请重试"
return
try:
print "该帖子共有" + str(pageNum) + "页"
for i in range(1,int(pageNum)+1):
print "正在写入第" + str(i) + "页数据"
page = self.getPage(i)
contents = self.getContent(page)
self.writeData(contents)
except IOError,e:
print "写入异常,原因:" + e.message
finally:
print "写入任务完成!!!"
print u"请输入帖子代号"
baseURL = 'http://tieba.baidu.com/p/' + str(raw_input(u'http://tieba.baidu.com/p/'))
seeLZ = raw_input("是否只获取楼主发言,是输入1,否输入0\n")
floorTag = raw_input("是否写入楼层信息,是输入1,否输入0\n")
bdtb = BDTB(baseURL,seeLZ,floorTag)
bdtb.start()
================================================================================
测试结果如下: