0.20版本的API发生了较大变化,新的类层次组织在包 org.apache.hadoop.mapreduce下。
package hi;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCount{
public static class M extends Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String[] line = value.toString().split("[\\s,\\.:;]+");
for (String word : line) {
context.write(new Text(word), new LongWritable(1));
}
}
}
public static class R extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override
protected void reduce(Text k, Iterable<LongWritable> list,
Context context) throws IOException, InterruptedException {
long count = 0L;
for (LongWritable item : list) {
count += item.get();
}
context.write(k, new LongWritable(count));
}
}
public static void main(String[] args) throws Exception {
Job job = new Job();
job.setJarByClass(WordCount.class);
FileInputFormat.addInputPath(job, new Path("in"));
FileOutputFormat.setOutputPath(job, new Path("out"));
job.setMapperClass(M.class);
job.setReducerClass(R.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
System.out.println(job.waitForCompletion(true) ? 0 : 1);
}
}
0.19中的某些很有用的特性在0.20中未实现,例如MultipleOutputs。
0.21包结构变化更大。
囧 为消除 API不稳定因素,试着转向更高层次的Pig 吧
REGISTER your_own_udf.jar; raw = LOAD 'data.txt' USING PigStorage(' '); data = Filter raw BY NOT your.own.filter.IsValid($0) AND your.own.filter.IsValid2($12); --result = FOREACH data GENERATE STORE data INTO 'result' USING PigStorage();