1、Linux
#先排序,后取重
sort file.txt | uniq
2、c++
3、python
- 单列文本去重
#!/usr/bin/python
#coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def text_duplicate_byset(sourcepath, destpath):
sum = 0
sum_pre = 0
addrs = set()
with open(sourcepath, 'r') as scan_file:
for line in scan_file.readlines():
sum_pre += 1
# addr = get_addr(line)
# line.decode('utf8')
addrs.add(line)
scan_file.close()
with open(destpath, 'w') as infile:
while len(addrs) > 0:
sum += 1
infile.write(addrs.pop())
infile.close()
#print(addrs)
print(u"去重之前文本条数: "+str(sum_pre))
print(u"去重之后文本条数: "+str(sum))
return sum_pre,sum
#example
sourcepath = 'log.txt'
destpath = 'log_du.txt'
text_duplicate_bylist(sourcepath,destpath)
input&&output
#input:
你好你好
你好你好
你好你好
#output:
你好你好
- 多列文本去重
#!/usr/bin/python
#coding:utf-8
#row代表以某一列为基准去重,默认为第1列
def text_duplicate_bylist(sourcepath, destpath, row = 0):
sum = 0
sum_pre = 0
addrs = {}
with open(sourcepath, 'r') as scan_file:
for line in scan_file.readlines():
line = line.split('\t')
sum_pre += 1
if len(line) < 2 :
continue
addrs[line[row]] = '\t'.join(line[row + 1:])
scan_file.close()
with open(destpath, 'w') as infile:
for key in addrs.keys():
tmp_str = addrs[key]
#while len(addrs) > 0:
sum += 1
infile.write(key + '\t' + tmp_str)
infile.close()
#print(addrs)
print(u"去重之前文本条数: "+str(sum_pre))
print(u"去重之后文本条数: "+str(sum))
return sum_pre,sum
#example
sourcepath = 'log.txt'
destpath = 'log_du.txt'
text_duplicate_bylist(sourcepath,destpath)
input&&output
#input,各列以 \t 分隔
你好你好 one
你好你好 one
你好你好 one
你好你好啊 one
#output:
你好你好 one
你好你好啊 one