python天涯帖子_python多线程抓取天涯帖子内容示例

#coding:utf-8

import urllib

import re

import threading

import os,time

class Down_Tianya(threading.Thread):

"""多线程下载"""

def __init__(self,url,num,dt):

threading.Thread.__init__(self)

self.url = url

self.num = num

self.txt_dict = dt

def run(self):

print 'downling from %s' % self.url

self.down_text()

def down_text(self):

"""根据传入的url抓出各页内容,按页数做键存入字典"""

html_content =urllib.urlopen(self.url).read()

text_pattern = re.compile('时间:(.*?).*?.*?

html_page = urllib.urlopen(url).read()

page_pattern = re.compile(r'(\d*)\s*下页')

page_result = page_pattern.search(html_page)

if page_result:

page_num = int(page_result.group(1))

return page_num

def write_text(dict,fn):

"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""

tx_file = open(fn,'w+')

pn = len(dict)

for i in range(1,pn+1):

tx_list = dict[i]

for tx in tx_list:

tx = tx.replace('
','\r\n').replace('
','\r\n').replace(' ','')

tx_file.write(tx.strip()+'\r\n'*4)

tx_file.close()

def main():

url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'

file_name ='abc.txt'

my_page = page(url)

my_dict = {}

print 'page num is : %s' % my_page

threads = []

"""根据页数构造urls进行多线程下载"""

for num in range(1,my_page+1):

myurl = '%s%s.shtml' % (url[:-7],num)

downlist = Down_Tianya(myurl,my_dict)

downlist.start()

threads.append(downlist)

"""检查下载完成后再进行写入"""

for t in threads:

t.join()

write_text(my_dict,file_name)

print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':

main()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值