一.Introduction
MapReduce Introduction(Wiki)
二.Eclipse + MapReduce
1.Setting
2.Code:
WordcountMapper.java:
package com.bsr.bigdata.mapreduce;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
/**
* KEYIN:默认情况下是mr框架所读到的一行文本的起始偏移量 long
* 不过我们在这里采用的是hadoop给你的LongWritable
* valuein:默认情况是mr框架所读到的一行文本的内容 string 用Text
* KEYOUT:用户自定义逻辑处理完成后输出数据中的key。srting
* VALUEOUT:用户自定义处理完成后输出的valuer。Integer IntWritable
*
*/
public class WordcountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
/**
* map阶段的业务逻辑就应该写在map()方法中
* maptask会对每一次输入的用map方法处理
*/
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//将传递进来的内容转换成String
String line = value.toString();//hello world
//用空格切分数据
String[] words = line.split(" ");//hello,world,hello,feima....
//将单词输出为<单词,1>
for(String word:words){
//将单词做为key 1作为value一边与以后的分发 发到reduce
context.write(new Text(word),new IntWritable(1)); //hello 1 , world 1......
}
}
}
WordcountReducer.java:
package com.bsr.bigdata.mapreduce;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* KEYIN, VALUEIN和 Mapper输出的对应
* @author Administrator
* KEYOUT是单词
* VALUEOUT总次数
*/
public class WordcountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {
int count = 0;
for(IntWritable value:values){
count += value.get();
}
context.write(key, new IntWritable(count));
//后面有个组件拿到你的count输出到一个文件中,有几个reducetast写几个。一般写到hdfs中
//同时你任务开始的文件也需要被指定
}
}
WordcountDriver.java:
package com.bsr.bigdata.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* 相当于yarn集群的客户端
* 需要再次封装我们的mr程序运行的相关参数和jar包
* 最后提交给yarn--->hadoop集群
* @author Administrator
*
*/
public class WordcountDriver {
//必须要main方法,因为只有这个程序开始后才能后面的处理
public static void main(String[] args) throws Exception {
//初始配置
Configuration conf = new Configuration();
//conf.set("yar.resoucemanager.hostname","bsrrac3");
Job job = Job.getInstance(conf);
//指定本程序JAR包的路径
job.setJarByClass(WordcountDriver.class);
//指定本业务job要使用的map业务类
job.setMapperClass(WordcountMapper.class);
//指定本业务job要使用的reduce业务类
job.setReducerClass(WordcountReducer.class);
//指定map输出类型
job.setMapOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//最终reduce输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//指定job输入源文件所在目录
FileInputFormat.setInputPaths(job, new Path("/wordcount/in"));
//指定job输出结果
FileOutputFormat.setOutputPath(job, new Path("/wordcount/out"));
//将job中配置的相关参数以及先关job所用的java的jar包交给yarn
//job.submit();
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}
3.jar && transfer:
5.put:
hdfs dfs -mkdir -p /wordcount/in
hdfs dfs -put hello.txt /wordcount/in
hadoop jar wc.jar
*I encounter a problem, when I put hello.txt in hdfs, the error is datanode is 0, please browse the following blog:
http://blog.csdn.net/zuiaituantuan/article/details/6533867
三.Eclipse + MapReduce.flow:
FlowBean.java:
package com.bsr.bigdata.mapreduce.flow;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
public class FlowBean implements Writable{
private long upFlow;
private long dFlow;
private long sumFlow;
//反序列化时,需要反射调用空参构造函数,所以要显示定义一个
public FlowBean(){}
public FlowBean(long upFlow, long dFlow) {
this.upFlow = upFlow;
this.dFlow = dFlow;
this.sumFlow = upFlow + dFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getdFlow() {
return dFlow;
}
public void setdFlow(long dFlow) {
this.dFlow = dFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
/**
* 序列化方法
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(dFlow);
out.writeLong(sumFlow);
}
/**
* 反序列化方法
* 注意:反序列化的顺序跟序列化的顺序完全一致
*/
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
dFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + dFlow + "\t" + sumFlow;
}
}
FlowCount.java:
package com.bsr.bigdata.mapreduce.flow;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowCount{
static class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
//将一行数据转成String
String line= value.toString();
//切分字符
String[] fields = line.split("\t");
//手机号
String phoneNbr = fields[1];
//取出上下行流量
long upFlow = Long.parseLong(fields[fields.length-3]);
long dFlow = Long.parseLong(fields[fields.length-2]);
context.write(new Text(phoneNbr), new FlowBean(upFlow, dFlow));
}
}
static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context)
throws IOException, InterruptedException {
long sum_upFlow = 0;
long sum_dFlow = 0;
//遍历所有的bean 将上行流量和下行流量分别累加
for(FlowBean bean:values){
sum_upFlow += bean.getUpFlow();
sum_dFlow += bean.getdFlow();
}
//将所有的流量进行汇总
FlowBean resultBean = new FlowBean(sum_upFlow, sum_dFlow);
context.write(key, resultBean);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
// conf.set("mapreduce.framework.name", "yarn");
// conf.set("yarn.resoucemanager.hostname", "hadoop");
// conf.set("mapreduce.framework.name", "local");
// conf.set("yarn.resoucemanager.hostname","local");
// conf.set("fs.defaultFS", "file:///");
Job job = Job.getInstance(conf);
/*job.setJar("/home/hadoop/wc.jar");*/
//指定本程序的jar包所在的本地路径
job.setJarByClass(FlowCount.class);
//指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReducer.class);
//指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path("d:/flow/in"));
//指定job的输出结果所在目录
FileOutputFormat.setOutputPath(job, new Path("d:/flow/out"));
//将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
/*job.submit();*/
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}