1.创建maven工程
在pom.xml文件中添加如下依赖
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.1.3</version>
</dependency>
<!-- 安装java测试框架-->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.2</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-nop -->
<!-- 安装日志框架-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-nop</artifactId>
<version>1.7.35</version>
</dependency>
</dependencies>
<!--maven打jar包要用的依赖插件(不把依赖框架一起打包)-->
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.6.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<!--当你想要把依赖一起打包时加上下面的代码-->
<!-- <plugin>-->
<!-- <artifactId>maven-assembly-plugin</artifactId>-->
<!-- <configuration>-->
<!-- <descriptorRefs>-->
<!-- <descriptorRef>jar-with-dependencies</descriptorRef>-->
<!-- </descriptorRefs>-->
<!-- </configuration>-->
<!-- <executions>-->
<!-- <execution>-->
<!-- <id>make-assembly</id>-->
<!-- <phase>package</phase>-->
<!-- <goals>-->
<!-- <goal>single</goal>-->
<!-- </goals>-->
<!-- </execution>-->
<!-- </executions>-->
<!-- </plugin>-->
</plugins>
</build>
2.在项目的resources目录下,创建一个文件,命名为 log4j.properties ,在文中填入以下内容
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
3.自定义一个类实现Writable接口
import org.apache.hadoop.io.Writable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /* * 1.定义类实现Writable接口 * 2.重写空参构造 * 3.重写序列方法和反序列方法 * 4.重写toString方法 * */ public class FlowBean implements Writable { private long upFlow;//上行流量 private long downFlow;//下行流量 private long sumFlow;//总流量 //空参构造函数 public FlowBean() { } public long getUpFlow() { return upFlow; } public void setUpFlow(long upFlow) { this.upFlow = upFlow; } public long getDownFlow() { return downFlow; } public void setDownFlow(long downFlow) { this.downFlow = downFlow; } public long getSumFlow() { return sumFlow; } public void setSumFlow() { this.sumFlow = this.downFlow + this.upFlow; } //序列方法 @Override public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(downFlow); out.writeLong(sumFlow); } //这里要注意:序列方法和反序列方法里的参数位置一定要一致 //反序列方法 @Override public void readFields(DataInput in) throws IOException { this.upFlow = in.readLong(); this.downFlow = in.readLong(); this.sumFlow = in.readLong(); } @Override public String toString() { return upFlow + "\t" + downFlow + "\t" + sumFlow; } }
4.Map类
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException; /* * 输入的key: 一行,所以是 LongWritable 类型 * 输入的value: 电话号码,所以是 Text 类型 * 输出的key: 电话号码,所以是 Text 类型 * 输出的value: 总流量,所以是 FlowBean 对象 (这里FlowBean类是我们定义好的) * */ public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> { //定义key输出类型 Text outK = new Text(); //定义value输出类型 FlowBean outV = new FlowBean(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //1.获取一行,并转为String类型 String line = key.toString(); //2.切割 String[] split = line.split("\t"); //3.抓取想要的数据 String phone = split[1]; String up = split[split.length - 3]; String down = split[split.length - 2]; //4.封装 outK.set(phone); outV.setUpFlow(Long.parseLong(up)); outV.setDownFlow(Long.parseLong(up)); outV.setSumFlow(); //输出 context.write(outK, outV); } }
5.Reduce类
import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class FlowReducer extends Reducer<Text, FlowBean, Text, FlowBean> { FlowBean outV = new FlowBean(); @Override protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException { long sumUp = 0; long sumDown = 0; //1.遍历集合进行累加 for (FlowBean value : values){ sumUp = value.getUpFlow(); sumDown = value.getDownFlow(); } //2.封装 outV.setUpFlow(sumUp); outV.setDownFlow(sumDown); outV.setSumFlow(); //3.输出 context.write(key, outV); } }
6.Driver类
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; public class FlowDriver { public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { //1.获取job Configuration conf = new Configuration(); Job job = Job.getInstance(conf); //2.设置jar job.setJarByClass(FlowDriver.class); //3.关联Mapper和Reduce job.setMapperClass(FlowMapper.class); job.setReducerClass(FlowReducer.class); //4.设置 Mapper的kv 输出类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(FlowBean.class); //5.设置 数据最终的kv 输出类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); //6.设置输入和输出的路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); //7.提交job boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); } }
最后打jar包,传到linux上的hadoop文件下,运行代码如下:
hadoop jar 包名 Driver类的路径 输入路径 输出路径