hadoop 文本统计一个字符的个数_使用hadoop统计多个文本中每个单词数目

程序源码

1 importjava.io.IOException;2 importjava.util.StringTokenizer;3 importorg.apache.hadoop.conf.Configuration;4 importorg.apache.hadoop.fs.Path;5 importorg.apache.hadoop.io.IntWritable;6 importorg.apache.hadoop.io.LongWritable;7 importorg.apache.hadoop.io.Text;8 importorg.apache.hadoop.mapreduce.Job;9 importorg.apache.hadoop.mapreduce.Mapper;10 importorg.apache.hadoop.mapreduce.Reducer;11 importorg.apache.hadoop.mapreduce.lib.input.FileInputFormat;12 importorg.apache.hadoop.mapreduce.lib.input.TextInputFormat;13 importorg.apache.hadoop.mapreduce.lib.output.FileOutputFormat;14 importorg.apache.hadoop.mapreduce.lib.output.TextOutputFormat;15

16 public classWordCount {17 public static class WordCountMap extends

18 Mapper{19 private final IntWritable one = new IntWritable(1);//输出的值 120 private Text word = newText();//输出的键 单词21

22 public voidmap(LongWritable key, Text value, Context context)23 throwsIOException, InterruptedException {//处理经过 TextInputFormat 产生的 ,然后产生 24 String line =value.toString();//读取文本中25 StringTokenizer token = newStringTokenizer(line);//按照空格对单词进行切割26 while(token.hasMoreTokens()) {27 word.set(token.nextToken());//读取到的单词作为键值28 context.write(word, one);//以 单词,1的中间形式交给reduce处理29 }30 }31 }32

33 public static class WordCountReduce extends

34 Reducer{35 public void reduce(Text key, Iterablevalues,36 Context context) throwsIOException, InterruptedException {37 int sum = 0;38 for(IntWritable val : values) {39 sum +=val.get();40 }41 context.write(key, newIntWritable(sum));42 }43 }44

45 public static void main(String[] args) throwsException {46 Configuration conf = newConfiguration();47 Job job = newJob(conf);48 job.setJarByClass(WordCount.class);49 job.setJobName("wordcount");50 job.setOutputKeyClass(Text.class);51 job.setOutputValueClass(IntWritable.class);52 job.setMapperClass(WordCountMap.class);53 job.setReducerClass(WordCountReduce.class);54 job.setInputFormatClass(TextInputFormat.class);//生成可供Map处理的键值对55 job.setOutputFormatClass(TextOutputFormat.class);56 FileInputFormat.addInputPath(job, new Path(args[0]));57 FileOutputFormat.setOutputPath(job, new Path(args[1]));58 job.waitForCompletion(true);59 }60 }

1 编译源码

javac -classpath /opt/hadoop-1.2.1/hadoop-core-1.2.1.jar:/opt/hadoop-1.2.1/lib/commons-cli-1.2.jar -d ./word_count_class/ WordCount.java

将源码编译成class文件并放在当前文件夹下的word_count_class目录,当然,首先需要创建该目录

2 将源码打成jar包

进入源码目录

jar -cvf wordcount.jar  *

3 上传输入文件

先在hadoop中为本次任务创建一个输入文件存放目录

hadoop fs -mkdir input_wordcount

将input目录下的所有文本文件上传到hadoop中的input_wordcount目录下

hadoop fs -put input/* input_wordcount/

注意:不能在运行前穿创建输出文件夹

4 上传jar并执行

hadoop jar word_count_class/wordcount.jar input_wordcount output_wordcount

5 查看计算结果

程序输出目录

hadoop fs -ls output_wordcount

程序输出内容

hadoop fs -cat output_wordcount/part-r-00000

版本二:自己实际操作中的程序

Map程序

1 packagecom.zln.chapter03;2

3 importorg.apache.hadoop.io.IntWritable;4 importorg.apache.hadoop.io.LongWritable;5 importorg.apache.hadoop.io.Text;6 importorg.apache.hadoop.mapred.MapReduceBase;7 importorg.apache.hadoop.mapred.Mapper;8 importorg.apache.hadoop.mapred.OutputCollector;9 importorg.apache.hadoop.mapred.Reporter;10

11 importjava.io.IOException;12 importjava.util.StringTokenizer;13

14 /**

15 * Created by sherry on 15-7-12.16 */

17 public class WordCountMap extends MapReduceBase implements Mapper{18 private final static IntWritable one = new IntWritable(1);//每个单词 +1

19 private Text word = newText();20

21 @Override22 public void map(LongWritable longWritable, Text text, OutputCollector outputCollector, Reporter reporter) throwsIOException {23 String line =text.toString();24 StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词

25 while(tokenizer.hasMoreTokens()){26 word.set(tokenizer.nextToken());27 outputCollector.collect(word,one);28 }29 }30 }

Reduce程序

1 packagecom.zln.chapter03;2

3 importorg.apache.hadoop.io.IntWritable;4 importorg.apache.hadoop.io.Text;5 importorg.apache.hadoop.mapred.MapReduceBase;6 importorg.apache.hadoop.mapred.OutputCollector;7 importorg.apache.hadoop.mapred.Reducer;8 importorg.apache.hadoop.mapred.Reporter;9

10 importjava.io.IOException;11 importjava.util.Iterator;12

13 /**

14 * Created by sherry on 15-7-12.15 */

16 public class WordCountReduce extends MapReduceBase implements Reducer{17 @Override18 public void reduce(Text text, Iterator iterator, OutputCollector outputCollector, Reporter reporter) throwsIOException {19 int sum = 0;20 while(iterator.hasNext()){21 sum +=iterator.next().get();22 }23 outputCollector.collect(text,newIntWritable(sum));24 }25 }

主函数

1 packagecom.zln.chapter03;2

3 importorg.apache.hadoop.fs.Path;4 importorg.apache.hadoop.io.IntWritable;5 importorg.apache.hadoop.io.Text;6 import org.apache.hadoop.mapred.*;7

8 importjava.io.IOException;9

10

11 /**

12 * Created by sherry on 15-7-12.13 */

14 public classWordCount {15 public static void main(String[] args) throwsIOException {16 JobConf conf = new JobConf(WordCount.class);17 conf.setJobName("wordCount");18

19 //设置输出格式

20 conf.setOutputKeyClass(Text.class);21 conf.setOutputValueClass(IntWritable.class);22

23 //设置MapReduce类

24 conf.setMapperClass(WordCountMap.class);25 conf.setReducerClass(WordCountReduce.class);26

27 //设置处理输入类

28 conf.setInputFormat(TextInputFormat.class);29 //设置处理输出类

30 conf.setOutputFormat(TextOutputFormat.class);31

32 FileInputFormat.setInputPaths(conf, new Path(args[0]));33 FileOutputFormat.setOutputPath(conf, new Path(args[1]));34

35 JobClient.runJob(conf);36 }37 }

准备输入文件

file1

Hello Word By Word

Hello Word By zln

file2

Hello Hadoop

Hello GoodBye

放在同一个目录下:/home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备

编译class打成一个jar包

我使用IDEA进行编译。注意不要忘记指定main函数

上传输入文件

root@sherry:/opt/hadoop-1.2.1# hadoop fs -mkdir /user/root/zln/WordCount/InputFiles

root@sherry:/opt/hadoop-1.2.1# hadoop fs -put /home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备/*/user/root/zln/WordCount/InputFiles

上传jar并执行

root@sherry:/opt/hadoop-1.2.1# hadoop jar /home/sherry/IdeaProjects/Hadoop/out/artifacts/WordCount_jar/WordCount.jar /user/root/zln/WordCount/InputFiles /user/root/zln/WordCount/OutputFiles

查看执行结果

root@sherry:/opt/hadoop-1.2.1# hadoop fs -ls /user/root/zln/WordCount/OutputFiles

root@sherry:/opt/hadoop-1.2.1# hadoop fs -text /user/root/zln/WordCount/OutputFiles/part-00000

版本三:使用新版本的API对Map  Reduce  main函数进行重写

Map

1 packagecom.zln.chapter03;2

3 importorg.apache.hadoop.io.IntWritable;4 importorg.apache.hadoop.io.LongWritable;5 importorg.apache.hadoop.io.Text;6 importorg.apache.hadoop.mapreduce.Mapper;7

8 importjava.io.IOException;9 importjava.util.StringTokenizer;10

11 /**

12 * Created by sherry on 15-7-12.13 */

14 public class WordCountMap extends Mapper{15 private final static IntWritable one = new IntWritable(1);//每个单词 +1

16 private Text word = newText();17

18

19 @Override20 protected void map(LongWritable key, Text value, Context context) throwsIOException, InterruptedException {21 String line =value.toString();22 StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词

23 while(tokenizer.hasMoreTokens()){24 word.set(tokenizer.nextToken());25 context.write(word,one);26 }27 }28

29 }

Reduce

1 packagecom.zln.chapter03;2

3 importorg.apache.hadoop.io.IntWritable;4 importorg.apache.hadoop.io.Text;5 importorg.apache.hadoop.mapreduce.Reducer;6

7 importjava.io.IOException;8

9 /**

10 * Created by sherry on 15-7-12.11 */

12 public class WordCountReduce extends Reducer{13

14 @Override15 protected void reduce(Text key, Iterable values, Context context) throwsIOException, InterruptedException {16 int sum = 0;17 for(IntWritable intWritable:values){18 sum +=intWritable.get();19 }20 context.write(key,newIntWritable(sum));21 }22 }

Main

1 packagecom.zln.chapter03;2

3

4 importorg.apache.hadoop.conf.Configured;5 importorg.apache.hadoop.fs.Path;6 importorg.apache.hadoop.io.IntWritable;7 importorg.apache.hadoop.io.Text;8 importorg.apache.hadoop.mapreduce.Job;9 importorg.apache.hadoop.mapreduce.lib.input.FileInputFormat;10 importorg.apache.hadoop.mapreduce.lib.input.TextInputFormat;11 importorg.apache.hadoop.mapreduce.lib.output.FileOutputFormat;12 importorg.apache.hadoop.mapreduce.lib.output.TextOutputFormat;13 importorg.apache.hadoop.util.Tool;14 importorg.apache.hadoop.util.ToolRunner;15

16

17

18 /**

19 * Created by sherry on 15-7-12.20 */

21 public class WordCount extends Configured implementsTool{22

23 public int run(String[] args) throwsException {24 Job job = newJob(getConf());25 job.setJarByClass(WordCount.class);26 job.setJobName("WordCount");27

28

29 job.setOutputKeyClass(Text.class);30 job.setOutputValueClass(IntWritable.class);31

32 job.setMapperClass(WordCountMap.class);33 job.setReducerClass(WordCountReduce.class);34

35 job.setInputFormatClass(TextInputFormat.class);36 job.setOutputFormatClass(TextOutputFormat.class);37

38 FileInputFormat.setInputPaths(job,new Path(args[0]));39 FileOutputFormat.setOutputPath(job,new Path(args[1]));40

41 boolean success = job.waitForCompletion(true);42 return success?0:1;43 }44

45 public static void main(String[] args) throwsException {46 int ret = ToolRunner.run(newWordCount(),args);47 System.exit(ret);48 }49 }

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值