最近在学习hadoop,安装的版本是hadoop2.7.3。
思考着如何把编写好的mapreduce内容部署到hadoop中并运行这个程序,下面记录了这部分实践内容。上面代码打包 hadoop-test.jar,打包方式任选。
package com.ksy.hadoop; import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; /** * 该例子为网上经典例子统计单词出现次数 * */ public class WordCount { public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); /** * key 偏移量包括了回车所占的字符数(Windows和Linux环境会不同) * value 一行数据 * context存储新Map的对象 */ public void map(Object key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); context.write(word, one); } } } public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> { private IntWritable result = new IntWritable(); /** * key 为Map中的key,hadoop会把相同key的内容合并为一个list,该list就为values。 * context为存放结果的对象 */ public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length != 2) { System.err.println("Usage: wordcount <in> <out>"); System.exit(2); } Job job = new Job(conf, "word count"); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } }
- 上传包到部署有hadoop的机器上,本例子上传到/home/hadoop目录。
- 用工具putty/SecureCRT登录到系统,进入hadoop/bin目录下。
- 运行命令./hadoop jar ~/hadoop-test.jar com.ksy.hadoop.WordCount /user/hadoopfile output,这样就把该例子运行了,通过./hdfs dfs -ls /user/hadoop/output/可以查看到运行后生成了两个文件
hadoop@ubuntu-114:/usr/local/hadoop/bin$ ./hdfs dfs -ls /user/hadoop/output/ Found 2 items -rw-r--r-- 1 hadoop supergroup 0 2017-07-25 19:00 /user/hadoop/output/_SUCCESS -rw-r--r-- 1 hadoop supergroup 57649 2017-07-25 19:00 /user/hadoop/output/part-r-00000
其中/user/hadoopfile是需要分析的hdfs文件,该文件可以通过shell命令上传到hdfs中,output是输出目录。