百度贴吧爬虫

#!/usr/bin/env python
#coding=utf-8
import httplib2
import json
from lxml import etree
def replace(s):
	s= s.replace('/p/','http://tieba.baidu.com/p/')
	return s

def openhttp(url):
	h2 = httplib2.Http('.cache')
	(resp2,html) = h2.request(url,'GET')
	return html

def store_file(reply_sum):
	filehandle.write(reply_sum['topic'])
	filehandle.write('\n')
	try:
		for i in reply_sum['every_floor']:
			filehandle.write(str(i['floor']))
			filehandle.write('\t')
			filehandle.write(str(i['id']))
			filehandle.write('\t')
			filehandle.write(i['name'])
			filehandle.write('\t')
			filehandle.write(i['content'])
			filehandle.write('\t')
			filehandle.write(i['time'])
			filehandle.write('\n')
	except:
		print('')

def parse_link(topic,link):
	original_link = link
	sub_html = openhttp(link)
	now_page = etree.HTML(sub_html.decode('gbk'))
	total_page = int(now_page.xpath(u'//*[@class="l_reply_num"]/span')[0].text)
	#print total_page
	print "共有页码数:%d" %(total_page)
	print 'start=========================='
	print "主题是:%s" %(topic.encode('utf8'))
	floor = 0
	reply_dict = {}
	reply_list =[]
	reply_sum = {}
	reply_sum['topic'] = topic.encode('utf8')
	n = 1
	while(total_page>=n):
		link = original_link
		link = link + '?pn='+str(n)
		print '准备检索的url'
		print link
		sub_html = openhttp(link)
		now_page = etree.HTML(sub_html.decode('gbk'))
		replies = now_page.xpath(u'//*[@class="l_post "]|//*[@class="l_post noborder"]')
		for reply in replies:
			try:
				print "层数:%d" %(floor)
				contents = reply.xpath(u'descendant::div[@class="d_post_content j_d_post_content"]')
				json_str = reply.attrib['data-field']
				author_data = json.loads(json_str)
			
				author_id = author_data["author"]["id"]
				author_name = author_data["author"]["name"]
				author_time = author_data["content"]["date"]
				for content in contents:
					print ''
				reply_dict['floor'] = floor+1
				reply_dict['id']= author_id 
				reply_dict['content'] = content.text.encode('utf8')
				reply_dict['time'] = author_time.encode('utf8')
				reply_dict['name'] = author_name.encode('utf8')
			except:
				print('')
			reply_list+=[reply_dict]
			reply_dict = {}
			
			 	
			floor = floor +1
		n = n+1
	for i in reply_list:
				for m in i:
					print i[m]
	reply_sum['every_floor'] = reply_list
	store_file(reply_sum)


#http://tieba.baidu.com/p/2259628273?pn=2d_post_content j_d_post_content
def main():
	
	pn = 0
	while pn < 50:
		url = 'http://tieba.baidu.com/f?kw=%B0%CD%C0%E5%B5%BA&tp='+str(pn)
		print url
		main_html = openhttp(url)
		l = []
		page = etree.HTML(main_html.decode('gbk'))
		p = page.xpath(u'//a[@target="_blank"][@class="j_th_tit"]')
		print p[0].values()
		for h in p:
			l =  h.values()
			link = replace(l[0])
			topic = l[1]
			parse_link(topic,link)
			print topic
		pn = pn+50
	

if __name__=='__main__':
	filehandle = open('aaaaaaaaa.txt','w')
	main()
	filehandle.close()






评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值