python大文件排序_Python实现大文件排序的方法

作者:Sephiroth 字体: 类型:转载

这篇文章主要介绍了Python大文件排序的方法,涉及Python针对文件、缓存及日期等操作的相关技巧,具有一定参考借鉴价值,需要的朋友可以参考下

本文实例讲述了Python实现大文件排序的方法。分享给大家供大家参考。具体实现方法如下:

import gzipimport osfrom multiprocessing import Process, Queue, Pipe, current_process, freeze_supportfrom datetime import datetimedef sort_worker(input,output): while True: lines = input.get().splitlines() element_set = {} for line in lines: if line.strip() == ‘STOP‘: return try: element = line.split(‘ ‘) if not element_set.get(element): element_set = ‘‘ except: pass sorted_element = sorted(element_set) #print sorted_element output.put(‘\n‘.join(sorted_element))def write_worker(input, pre): os.system(‘mkdir %s‘%pre) i = 0 while True: content = input.get() if content.strip() == ‘STOP‘: return write_sorted_bulk(content, ‘%s/%s‘%(pre, i)) i += 1def write_sorted_bulk(content, filename): f = file(filename, ‘w‘) f.write(content) f.close()def split_sort_file(filename, num_sort = 3, buf_size = 65536*64*4): t = datetime.now() pre, ext = os.path.splitext(filename) if ext == ‘.gz‘: file_file = gzip.open(filename, ‘rb‘) else: file_file = open(filename) bulk_queue = Queue(10) sorted_queue = Queue(10) NUM_SORT = num_sort sort_worker_pool = for i in range(NUM_SORT): sort_worker_pool.append( Process(target=sort_worker, args=(bulk_queue, sorted_queue)) ) sort_worker_pool.start() NUM_WRITE = 1 write_worker_pool = for i in range(NUM_WRITE): write_worker_pool.append( Process(target=write_worker, args=(sorted_queue, pre)) ) write_worker_pool.start() buf = file_file.read(buf_size) sorted_count = 0 while len(buf): end_line = buf.rfind(‘\n‘) #print buf bulk_queue.put(buf) sorted_count += 1 if end_line != -1: buf = buf + file_file.read(buf_size) else: buf = file_file.read(buf_size) for i in range(NUM_SORT): bulk_queue.put(‘STOP‘) for i in range(NUM_SORT): sort_worker_pool.join() for i in range(NUM_WRITE): sorted_queue.put(‘STOP‘) for i in range(NUM_WRITE): write_worker_pool.join() print ‘elasped ‘, datetime.now() - t return sorted_countfrom heapq import heappush, heappopfrom datetime import datetimefrom multiprocessing import Process, Queue, Pipe, current_process, freeze_supportimport osclass file_heap: def __init__(self, dir, idx = 0, count = 1): files = os.listdir(dir) self.heap = self.files = {} self.bulks = {} self.pre_element = None for i in range(len(files)): file = files if hash(file) % count != idx: continue input = open(os.path.join(dir, file)) self.files = input self.bulks = ‘‘ heappush(self.heap, (self.get_next_element_buffered(i), i)) def get_next_element_buffered(self, i): if len(self.bulks) < 256: if self.files is not None: buf = self.files.read(65536) if buf: self.bulks += buf else: self.files.close() self.files = None end_line = self.bulks.find(‘\n‘) if end_line == -1: end_line = len(self.bulks) element = self.bulks self.bulks = self.bulks return element def poppush_uniq(self): while True: element = self.poppush() if element is None: return None if element != self.pre_element: self.pre_element = element return element def poppush(self): try: element, index = heappop(self.heap) except IndexError: return None new_element = self.get_next_element_buffered(index) if new_element: heappush(self.heap, (new_element, index)) return elementdef heappoppush(dir, queue, idx = 0, count = 1): heap = file_heap(dir, idx, count) while True: d = heap.poppush_uniq() queue.put(d) if d is None: returndef heappoppush2(dir, queue, count = 1): heap = procs = queues = pre_element = None for i in range(count): q = Queue(1024) q_buf = queue_buffer(q) queues.append(q_buf) p = Process(target=heappoppush, args=(dir, q_buf, i, count)) procs.append(p) p.start() queues = tuple(queues) for i in range(count): heappush(heap, (queues.get(), i)) while True: try: d, i= heappop(heap) except IndexError: queue.put(None) for p in procs: p.join() return else: if d is not None: heappush(heap,(queues.get(), i)) if d != pre_element: pre_element = d queue.put(d)def merge_file(dir): heap = file_heap( dir ) os.system(‘rm -f ‘+dir+‘.merge‘) fmerge = open(dir+‘.merge‘, ‘a‘) element = heap.poppush_uniq() fmerge.write(element+‘\n‘) while element is not None: element = heap.poppush_uniq() fmerge.write(element+‘\n‘)class queue_buffer: def __init__(self, queue): self.q = queue self.rbuf = self.wbuf = def get(self): if len(self.rbuf) == 0: self.rbuf = self.q.get() r = self.rbuf del self.rbuf return r def put(self, d): self.wbuf.append(d) if d is None or len(self.wbuf) > 1024: self.q.put(self.wbuf) self.wbuf = def diff_file(file_old, file_new, file_diff, buf = 268435456): print ‘buffer size‘, buf from file_split import split_sort_file os.system(‘rm -rf ‘+ os.path.splitext(file_old) ) os.system(‘rm -rf ‘+ os.path.splitext(file_new) ) t = datetime.now() split_sort_file(file_old,5,buf) split_sort_file(file_new,5,buf) print ‘split elasped ‘, datetime.now() - t os.system(‘cat %s/* | wc -l‘%os.path.splitext(file_old)) os.system(‘cat %s/* | wc -l‘%os.path.splitext(file_new)) os.system(‘rm -f ‘+file_diff) t = datetime.now() zdiff = open(file_diff, ‘a‘) old_q = Queue(1024) new_q = Queue(1024) old_queue = queue_buffer(old_q) new_queue = queue_buffer(new_q) h1 = Process(target=heappoppush2, args=(os.path.splitext(file_old), old_queue, 3)) h2 = Process(target=heappoppush2, args=(os.path.splitext(file_new), new_queue, 3)) h1.start(), h2.start() ld = old_queue.get() new = new_queue.get() old_count, new_count = 0, 0 while old is not None or new is not None: if old > new or old is None: zdiff.write(‘< ‘+new+‘\n‘) new = new_queue.get() new_count +=1 elif old < new or new is None: zdiff.write(‘> ‘+old+‘\n‘) ld = old_queue.get() old_count +=1 else: ld = old_queue.get() new = new_queue.get() print ‘new_count:‘, new_count print ‘old_count:‘, old_count print ‘diff elasped ‘, datetime.now() - t h1.join(), h2.join()

希望本文所述对大家的Python程序设计有所帮助。

免责申明:本栏目所发资料信息部分来自网络,仅供大家学习、交流。我们尊重原创作者和单位,支持正版。若本文侵犯了您的权益。请点击这里

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值