一、实验目的
- L通过实验掌握基本的MapReduce编程方法;
- 掌握用MapReduce解决一些常见的数据处理问题,包括数据去重计数、数据排序。
二、实验平台
- 操作系统:Linux
- Hadoop版本:3.3.1
三、实验步骤
实验所使用的文件链接:
链接:https://pan.baidu.com/s/16zyA_DZwu9anxjwdHnbMOw
提取码:57ky
(一)对访问同一个网站的用户去重计数。
注:文件userurl_20150911中,数据以”\t”隔开,用户手机号为第三列,网站主域为第17列
package com.user.mapreduce.homework;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import java.io.IOException;
public class UserCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//1、获取job
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//2、获取jar包路径
job.setJarByClass(UserCountDriver.class);
//3、关联mapoer和reducer
job.setMapperClass(UserCountMapper.class);
job.setReducerClass(UserCountReducer.class);
//4、设置map输出的key,value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
//5、设置最终输出的key,value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//6、设置输入路径和输出路径
FileInputFormat.setInputPaths(job,new Path("E:\\BigData\\homework\\hadoop作业\\userurl_20150911"));
FileOutputFormat.setOutputPath(job,new Path("C:\\Users\\lenovo\\Desktop\\answer"));
//7、提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
package com.user.mapreduce.homework;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.StringTokenizer;
public class UserCountMapper extends Mapper<Object, Text, Text, Text>{
private Text outk = new Text();
private Text outv = new Text();
@Override
protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = line.split("\t");
outk.set(split[16]);
outv.set(split[2]);
context.write(outk,outv);
}
}
package com.user.mapreduce.homework;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.*;
public class UserCountReducer extends Reducer<Text, Text, Text, IntWritable> {
private IntWritable outv = new IntWritable();
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
HashMap<String, Boolean> hashMap = new HashMap<String, Boolean>();
int num = 0;
for (Text value : values) {
String phone = value.toString();
//if (null == phone) continue;
if(hashMap.get(phone) != null) continue;
hashMap.put(phone,true);
++num;
}
outv.set(num);
context.write(key,outv);
}
}
(二)对同一个用户不同记录产生的上下行流量求和后进行排序输出。
注:上行流量位于第25列,下行流量位于第26列
package com.user.mapreduce.homework;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//1、获取job
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//2、获取jar包路径
job.setJarByClass(FlowtDriver.class);
//3、关联mapoer和reducer
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
//4、设置map输出的key,value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//5、设置最终输出的key,value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//6、设置输入路径和输出路径
FileInputFormat.setInputPaths(job,new Path("E:\\BigData\\homework\\hadoop作业\\userurl_20150911"));
FileOutputFormat.setOutputPath(job,new Path("C:\\Users\\lenovo\\Desktop\\answer"));
//7、提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
package com.user.mapreduce.homework;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.StringTokenizer;
public class FlowMapper extends Mapper<Object, Text, Text, IntWritable>{
private Text outk = new Text();
private IntWritable outv = new IntWritable();
@Override
protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] split = line.split("\t");
outk.set(split[2]);
outv.set(Integer.parseInt(split[24]) + Integer.parseInt(split[25]));
context.write(outk,outv);
}
}
package com.user.mapreduce.homework;
import com.user.mapreduce.writable.FlowBean;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.*;
public class FlowReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable outv = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
ArrayList<Integer> list = new ArrayList<>();
for (IntWritable value : values) {
list.add(value.get());
}
Collections.sort(list, new Comparator<Integer>() {
@Override
public int compare(Integer o1, Integer o2) {
if(o1 > o2) return 1;
if(o1 < o2) return -1;
return 0;
}
});
for (Integer integer : list) {
outv.set(integer);
context.write(key,outv);
}
}
}