最近在学Hadoop写了一个类似wordcount的mapreduce程序,看一下job中提交的过程。程序有四个类,map类、reduce类、自定义封装数据的bean类,除了bean类其他基本和wordcount结构差不多。
话不多说先把代码沾上
自定义bean类:
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;import org.apache.hadoop.io.Writable;
public class FlowBlean implements Writable{
private long uFlow;
private long dFlow;
private long sFlow;
public FlowBlean() {
super();
}
public FlowBlean(long uFlow, long dFlow) {
super();
this.uFlow = uFlow;
this.dFlow = dFlow;
this.sFlow = uFlow+dFlow;
}public void set(long uFlow, long dFlow) {
// TODO Auto-generated method stub
this.uFlow = uFlow;
this.dFlow = dFlow;
this.sFlow = uFlow+dFlow;
}@Override
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeLong(uFlow);
out.writeLong(dFlow);
out.writeLong(sFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub