这是Udacity的课程 intro to hadoop and mapReduce里面Lesson4的练习
其实这个任务与 word count类似,只不过这里Reducer输出索引,而word count的输出是 计数
#mapper
import sys
import csv
import string
def mapper():
reader = csv.reader(sys.stdin, delimiter='\t')
specials = ',.!?:;"()<>[]#$=-/'
trans = string.maketrans(specials, ' '*len(specials))
for line in reader:
if len(line) == 19:
body = line[4]
node_id = line[0]
body = body.translate(trans)
words = body.strip().split()
for word in words:
print "{0}\t{1}".format(word.lower(), node_id)
def main():
mapper()
sys.stdin = sys.__stdin__
main()
#reducer
#!/usr/bin/python
import sys
def reducer():
count = 0
oldKey = None
for line in sys.stdin:
data = line.strip().split("\t")
if len(data) != 2:
continue
thisKey, thisValue = data
if thisKey == "fantastically":
print thisKey, "\t", thisValue
if oldKey and oldKey != thisKey:
if oldKey == "fantastic":
print oldKey,"\t",count
oldKey = thisKey
count = 0
oldKey = thisKey
count += 1
if oldKey != None:
print oldKey,"\t",count
def main():
mapper()
sys.stdin = sys.__stdin__
main()