Mapper,Reducer,Wrapper的Java模板

http://www.easyigloo.org/?p=1145


很多时候想要测试hadoop上的一个想法,要求快速创建并运行任务。每个任务包含了至少3个组件。

Mapper类
Reducer类
Wrapper类

下面的代码用以产生空模板,只是将变量替换成自己的类名

MAPPER
-----------------------------------------------------------------------------------------------------------------------------------
MAPPER
-----------------------------------------------------------------------------------------------------------------------------------
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import java.io.IOException;

/* In case you are using Multiple outputs */
//import org.apache.hadoop.io.NullWritable;
//import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;

public class Mapper extends Mapper {
private Configuration conf;
private Text outputKey = new Text();
private Text outputValue = new Text();
private String line = null;

/* In case you are using Multiple outputs */
//private NullWritable outputValue = NullWritable.get();
//private MultipleOutputs contextMulti = null;

@Override
public void setup(Mapper.Context context) {
this.conf = context.getConfiguration();

/* In case you are using Multiple outputs */
//contextMulti = new MultipleOutputs(context);
}

@Override
public void map(LongWritable key, Text values, Context context)
throws IOException, InterruptedException {
}

@Override
public void cleanup (Mapper.Context context)throws IOException, InterruptedException {

/* In case you are using Multiple outputs */
//contextMulti.close();
}
}

-----------------------------------------------------------------------------------------------------------------------------------
REDUCER
-----------------------------------------------------------------------------------------------------------------------------------
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/* In case you are using Multiple outputs */
//import org.apache.hadoop.io.NullWritable;
//import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;

public class Reducer extends Reducer {
private Configuration conf;
private Text outputKey = new Text();
private Text outputValue = new Text();
private String line = null;

/* In case you are using Multiple outputs */
//private NullWritable outputValue = NullWritable.get();
//private MultipleOutputs contextMulti = null;

@Override
public void setup(Reducer.Context context) {
this.conf = context.getConfiguration();

/* In case you are using Multiple outputs */
//contextMulti = new MultipleOutputs(context);
}

@Override
public void reduce(Text key, Iterable values, Context context)
throws IOException, InterruptedException {
}

@Override
public void cleanup(Reducer.Context context) {
/* In case you are using Multiple outputs */
//contextMulti.close();
}
}
-----------------------------------------------------------------------------------------------------------------------------------
WRAPPER
这个类用到下面两个类

https://sites.google.com/site/hadoopandhive/home/ExtendedFileUtil.java?attredirects=0&d=1

https://sites.google.com/site/hadoopandhive/home/StringUtil.java?attredirects=0&d=1

-----------------------------------------------------------------------------------------------------------------------------------

import StringUtil;

import ExtendedFileUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.text.ParseException;

public class extends Configured implements Tool, Constants {
private Configuration conf = null;
private Job job = null;
private String inputDirList = null;
private String outputDir = null;
private String[] filesToProcess = null;
private int totalReducers = 0;
private int jobRes = 0;
private ExtendedFileUtil fileUtil = new ExtendedFileUtil();

public static void main(String[] args) throws Exception {
ob = new ();
int jobRes = ToolRunner.run(ob, args);
}

public int run(String[] args)
throws ClassNotFoundException, IOException, InterruptedException, ParseException {
jobRes = readCmdArgs(args);
if (jobRes == 0) {
jobRes = readConfig();
}
if (jobRes == 0) {
jobRes = runMrJob();
}
return jobRes;
}

private int readCmdArgs(String[] args) {
if (args.length == 2) {
inputDirList = args[0];
outputDir = args[1];
} else {
printUsage();
System.exit(1);
}
return 0;
}

private int readConfig() throws IOException, InterruptedException, ClassNotFoundException {
conf = new Configuration();
//conf.set("SET_NEW_CONFIG_NAME", SET_NEW_CONFIG_VALUE);
job = new Job(conf);
if ((job.getJar() == null) || (job.getJar() == "")) {
job.setJarByClass(.class);
}
return 0;
}

private int runMrJob()
throws IOException, InterruptedException, ClassNotFoundException {
filesToProcess = fileUtil.getFilesOnly(inputDirList, true);
job.setJobName("");
TextInputFormat.addInputPaths(job, StringUtil.arrayToString(filesToProcess, ","));
TextOutputFormat.setOutputPath(job, new Path(outputDir));
System.out.println("Input Dir: " + inputDirList);
System.out.println("Output Dir: " + outputDir);

job.setMapperClass(Mapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);

job.setReducerClass(Reducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
totalReducers = Math.round((fileUtil.size(inputDirList) / 134217728) * 0.1F);
totalReducers = Math.max(totalReducers, 1);
job.setNumReduceTasks(totalReducers );
deleteOutputDirectory(outputDir);
jobRes = job.waitForCompletion(true) ? 0 : 1;
deleteLogsDirectory();
fileUtil.removeAllZeroByteFiles(outputDir);
return 0;
}

private int deleteOutputDirectory(String outputDir) throws IOException {
fileUtil.removeHdfsPath(new Path(outputDir).toString());
return 0;
}

private int printUsage() {
System.out.println("USAGE:
");
return 0;
}

private int deleteLogsDirectory()
throws IOException {
Path outputLogPath = new Path(new Path(outputDir).toString() + "/" + "_logs");
fileUtil.removeHdfsPath(outputLogPath.toString());
return 0;
}
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的 MapperReducer 代码示例: Mapper: ```java public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); StringTokenizer tokenizer = new StringTokenizer(line); while (tokenizer.hasMoreTokens()) { word.set(tokenizer.nextToken()); context.write(word, one); } } } ``` 这个 Mapper 的作用是将输入的文本切分成单词,然后将每个单词输出为 key-value 形式,其中 key 是单词本身,value 固定为 1。 Reducer: ```java public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } ``` 这个 Reducer 的作用是将 Mapper 输出的 key-value 对中相同 key 的 value 进行合并计数,然后输出 key 和对应的计数值。 在 MapReduce 任务中,MapperReducer 是基本的数据处理单元。Mapper 将输入数据切分成若干个 key-value 对,然后将这些 key-value 对传递给 Reducer,Reducer 对这些 key-value 对进行合并和计算,最终输出最终结果。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值