mapreduce-1
mapreduce原理及操作过程
inputformat读取->maptask->分区->排序->规约->分组->reductask->outputformat写入结果
分区就是根据key来决定哪些key-value被分到同一个reduce处理,
而分组是根据key来决定同一个reduce中的key-value在同一批次中进行处理。
代码实现wordcount
mapper类实现
package pac1_wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//这里的切分条件按照自己准备的文本设置
String[] mes = value.toString().split("[ |,|.|']");
for (String word : mes) {
context.write(new Text(word), new IntWritable(1));
}
}
}
reducer类实现
package pac1_wordcount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class MyReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count=0;
for (IntWritable value : values) {
//这里也可以是count+=value.get();
//因为每个value都一定是1,所以这里就直接使用自增了
count++;
}
context.write(key,new IntWritable(count));
}
}
主类实现
package pac1_wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import java.io.IOException;
public class WCDriver {
public static void main(String[] args) {
Job job = null;
try {
//获取job对象
job = Job.getInstance(new Configuration(), "wordcount");
} catch (IOException e) {
e.printStackTrace();
}
//指定主类
job.setJarByClass(WCDriver.class);
//指定mapper和reducer类的实现方式为自定义的mapper和reducer
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
//设置mapper输出的Key和Value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//设置reducre输出的Key和Value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//设置文件读取方式
job.setInputFormatClass(TextInputFormat.class);
try {
//设置文件读取路径
TextInputFormat.addInputPath(job, new Path("file:///D:\\file\\project\\hadoop\\day0323\\src\\testFiles\\a.txt"));
} catch (IOException e) {
e.printStackTrace();
}
//设置结果输出格式
job.setOutputFormatClass(TextOutputFormat.class);
//设置结果输出路径
TextOutputFormat.setOutputPath(job, new Path("file:///d:/out"));
boolean result = false;
try {
//结果接收
result = job.waitForCompletion(true);
} catch (IOException | InterruptedException | ClassNotFoundException e) {
e.printStackTrace();
}
System.out.println(result?"成功":"失败");
}
}
主类有另一种实现方式,使用ToolRunner运行:
package combiner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.net.URI;
public class JobMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
Job job = Job.getInstance(super.getConf(), "wordCount");
job.setJarByClass(JobMain.class);
//中间设置过程和上面写法一样
boolean completion = job.waitForCompletion(true);
return completion ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int run = ToolRunner.run(new Configuration(), new JobMain(), args);
System.exit(run);
}
}
常见错误:
- ClassNotFountException:可能是因为没有设置job.setJarByClass(JobMain.class);
- 创建了输出文件夹,但是没有文件内容:导包错误,mapper和reducer以及主类里面设置的类型不对应会到这这种异常;
- 另外,本机的hadoop安装和HADOOP_HOME的环境变量配置也会影响运行