1,在当前目录创建test2目录
appadmin@hadoop-namenode1:/data/projects$ mkdir test2
2,创建WordCount.java类。
appadmin@hadoop-namenode1:/data/projects/test2$ vi WordCount.java
3,执行命令进行打jar包
jar -cvf wordcount.jar -C class .
4,创建txt目录,并创建book.txt文件 写入数据
appadmin@hadoop-namenode1:/data/projects/test2$ mkdir txt
appadmin@hadoop-namenode1:/data/projects/test2$ cd txt/
appadmin@hadoop-namenode1:/data/projects/test2/txt$ vi book.txt
5,在Hadoop上创建输入目录,并上传文件
hadoop fs -mkdir /user/appadmin1/input02
hadoop fs -put txt/book.txt /user/appadmin1/input02
6,执行wordCount进行,单词统计
appadmin@hadoop-namenode1:/data/projects/test2$ /data/projects/clusters/hadoop-1.1.1/bin/hadoop jar wordcount.jar test.WordCount hdfs:///user/appadmin1/input02 /tmp/output02
7,查看结果
appadmin@hadoop-namenode1:/data/projects/test2$ hadoop fs -cat /tmp/output02/part-r-00000
Warning: $HADOOP_HOME is deprecated.
c# 1
go 1
java 2
jdbc 1
php 1
python 2
ruby 1
va 1
附:wordcount.java
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class WordCount{
public static class Map extends Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
context.write(word, one);
}
}
}
public static class Reduce extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf, "wordcount");
job.setJarByClass(WordCount.class); // +++++
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
}
book.txt
java
python
jdbc
c#
ruby
php
java
go
python
java
操作过程及日志
appadmin@hadoop-namenode1:~$ cd /data/projects/test
appadmin@hadoop-namenode1:/data/projects/test$ ls
class txt wordcount.jar WordCount.java
appadmin@hadoop-namenode1:/data/projects/test$ cd txt/
appadmin@hadoop-namenode1:/data/projects/test/txt$ ls
input.txt
appadmin@hadoop-namenode1:/data/projects/test/txt$ vi input.txt
appadmin@hadoop-namenode1:/data/projects/test/txt$ hadoop fs -mkdir /user/appadmin1/input02
Warning: $HADOOP_HOME is deprecated.
appadmin@hadoop-namenode1:/data/projects/test/txt$ cd ..
appadmin@hadoop-namenode1:/data/projects/test$ hadoop fs -put txt/book.txt /user/appadmin1/input02
Warning: $HADOOP_HOME is deprecated.
put: File txt/book.txt does not exist.
appadmin@hadoop-namenode1:/data/projects/test$ hadoop fs -put txt/book.txt^Cuser/appadmin1/input02
appadmin@hadoop-namenode1:/data/projects/test$ ls
class txt wordcount.jar WordCount.java
appadmin@hadoop-namenode1:/data/projects/test$ cd ..
appadmin@hadoop-namenode1:/data/projects$ cd test2/
appadmin@hadoop-namenode1:/data/projects/test2$ ls
class txt wordcount.jar WordCount.java
appadmin@hadoop-namenode1:/data/projects/test2$ cd txt/
appadmin@hadoop-namenode1:/data/projects/test2/txt$ ls
book.txt
appadmin@hadoop-namenode1:/data/projects/test2/txt$ cd ..
appadmin@hadoop-namenode1:/data/projects/test2$ hadoop fs -put txt/book.txt /user/appadmin1/input02
Warning: $HADOOP_HOME is deprecated.
appadmin@hadoop-namenode1:/data/projects/test2$ /data/projects/clusters/hadoop-1.1.1/bin/hadoop jar wordcount.jar test.WordCount hdfs:///user/appadmin1/input02 /tmp/output02
Warning: $HADOOP_HOME is deprecated.
14/11/24 15:52:59 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
14/11/24 15:52:59 INFO input.FileInputFormat: Total input paths to process : 1
14/11/24 15:52:59 WARN snappy.LoadSnappy: Snappy native library is available
14/11/24 15:52:59 INFO util.NativeCodeLoader: Loaded the native-hadoop library
14/11/24 15:52:59 INFO snappy.LoadSnappy: Snappy native library loaded
14/11/24 15:52:59 INFO mapred.JobClient: Running job: job_201405201827_0590
14/11/24 15:53:00 INFO mapred.JobClient: map 0% reduce 0%
14/11/24 15:53:04 INFO mapred.JobClient: map 100% reduce 0%
14/11/24 15:53:12 INFO mapred.JobClient: map 100% reduce 33%
14/11/24 15:53:13 INFO mapred.JobClient: map 100% reduce 100%
14/11/24 15:53:14 INFO mapred.JobClient: Job complete: job_201405201827_0590
14/11/24 15:53:14 INFO mapred.JobClient: Counters: 29
14/11/24 15:53:14 INFO mapred.JobClient: Job Counters
14/11/24 15:53:14 INFO mapred.JobClient: Launched reduce tasks=1
14/11/24 15:53:14 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=4756
14/11/24 15:53:14 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
14/11/24 15:53:14 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
14/11/24 15:53:14 INFO mapred.JobClient: Rack-local map tasks=1
14/11/24 15:53:14 INFO mapred.JobClient: Launched map tasks=1
14/11/24 15:53:14 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=8693
14/11/24 15:53:14 INFO mapred.JobClient: File Output Format Counters
14/11/24 15:53:14 INFO mapred.JobClient: Bytes Written=51
14/11/24 15:53:14 INFO mapred.JobClient: FileSystemCounters
14/11/24 15:53:14 INFO mapred.JobClient: FILE_BYTES_READ=113
14/11/24 15:53:14 INFO mapred.JobClient: HDFS_BYTES_READ=172
14/11/24 15:53:14 INFO mapred.JobClient: FILE_BYTES_WRITTEN=49573
14/11/24 15:53:14 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=51
14/11/24 15:53:14 INFO mapred.JobClient: File Input Format Counters
14/11/24 15:53:14 INFO mapred.JobClient: Bytes Read=47
14/11/24 15:53:14 INFO mapred.JobClient: Map-Reduce Framework
14/11/24 15:53:14 INFO mapred.JobClient: Map output materialized bytes=113
14/11/24 15:53:14 INFO mapred.JobClient: Map input records=10
14/11/24 15:53:14 INFO mapred.JobClient: Reduce shuffle bytes=113
14/11/24 15:53:14 INFO mapred.JobClient: Spilled Records=20
14/11/24 15:53:14 INFO mapred.JobClient: Map output bytes=87
14/11/24 15:53:14 INFO mapred.JobClient: Total committed heap usage (bytes)=803995648
14/11/24 15:53:14 INFO mapred.JobClient: CPU time spent (ms)=1750
14/11/24 15:53:14 INFO mapred.JobClient: Combine input records=0
14/11/24 15:53:14 INFO mapred.JobClient: SPLIT_RAW_BYTES=125
14/11/24 15:53:14 INFO mapred.JobClient: Reduce input records=10
14/11/24 15:53:14 INFO mapred.JobClient: Reduce input groups=8
14/11/24 15:53:14 INFO mapred.JobClient: Combine output records=0
14/11/24 15:53:14 INFO mapred.JobClient: Physical memory (bytes) snapshot=328908800
14/11/24 15:53:14 INFO mapred.JobClient: Reduce output records=8
14/11/24 15:53:14 INFO mapred.JobClient: Virtual memory (bytes) snapshot=2504351744
14/11/24 15:53:14 INFO mapred.JobClient: Map output records=10
appadmin@hadoop-namenode1:/data/projects/test2$ hadoop fs -cat /tmp/output02/part-r-00000
Warning: $HADOOP_HOME is deprecated.
c# 1
go 1
java 2
jdbc 1
php 1
python 2
ruby 1
va 1
参考:http://blog.csdn.net/huoyunshen88/article/details/12624449