mapreduce实践篇
MAPREDUCE实例编写及编码规范
编程规范
- 用户编写的程序分成三个部分:Mapper,Reducer,Driver(提交 运行mr程序的客户端)
- Mapper的输入数据是KV对的形式(KV类型可以自定义)
- Mapper的输出数据是KV对的形式(KV的类型可自定义)
- Mapper中的业务逻辑写在map()方法中
- map()方法(maptask进程)对每一个
一个简单的Mapreduce—–WordCount
package com.mingming.bigdata.wordcount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author Mingming
* @Description
* @Date Created in 21:41 2017/12/19
* @Modificd By
*/
public class WordCount {
//定义一个Mapper类
static class WordCountMapper extends Mapper<LongWritable,Text,Text,IntWritable>{
Text wordbean = new Text();
IntWritable count = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//map的生命周期:框架每传一次数据就被调用一次
//key 这一行的起始点的文件中的偏移量
//value 这一行的内容
//拿到这一行的内容,转换为String
String line = value.toString();
//分切出各个单词
String[] words = line.split("\t");
//遍历数组输出单词
for (String word:words
) {
wordbean.set(word);
context.write(wordbean,count);
}
}
}
//定义一个Reducer类
static class WordCountReducer extends Reducer<Text,IntWritable,Text,IntWritable>{
/**
* 框架没传递进来一个kv组,reduce方法就被调用一次
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
IntWritable counts = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
for (IntWritable value:values){
count+=value.get();
}
counts.set(count);
context.write(key,counts);
}
}
public static void main(String[] args) throws IOException {
//把业务逻辑的相关信息(那个是mapper,那个是reducer,要处理的数据在那里,输出的结果放在那里***)描述成一个job对象。
//把这个描述好的job提交给集群去运行
Configuration conf = new Configuration();
Job wcjob = Job.getInstance(conf);
//指定我这个jar所在的位置
//wcjob.setJar("/home/hadoop/wordcount.jar");
wcjob.setJarByClass(WordCount.class);
wcjob.setMapperClass(WordCountMapper.class);
wcjob.setReducerClass(WordCountReducer.class);
//设定mapper的输出kv类型
wcjob.setMapOutputKeyClass(Text.class);
wcjob.setMapOutputValueClass(IntWritable.class);
//设置reducer的输出kv类型
wcjob.setOutputKeyClass(Text.class);
wcjob.setOutputValueClass(IntWritable.class);
//指定要处理的数据所在的位置
FileInputFormat.setInputPaths(wcjob,"hdfs://zookeeper1:9000/datainput");
FileOutputFormat.setOutputPath(wcjob,new Path("hdfs://zookeeper1:9000/wordcount/output"));
}
}
MAPREDUCE程序运行模式
本地运行模式:
mapreduce程序时被提交给LocalJobRunner在本地以单进程的形式运行
而处理的数据以及结果可以在本地文件系统,也可以在hdfs上
怎样实现本地运行?写一个程序,不要带集群的配置文件(本质上是你的mr程序的conf中是否有mapreduce.framework.name=local以及yarn.resourcemanager.hostname参数)
本地模式非常方便于进行业务逻辑的debug,只要在eclipse中打断点就可。
如果在windows下想运行本地模式来测试程序逻辑,需要在windows*中配置环境变量:
%HADOOP_HOME*% = d:/hadoop-2.6.1*
%PATH% = %HADOOP_HOME%\bin
并且要将d:/hadoop-2.6.1*的lib**和bin**目录替换成windows**平台编译的版本*
集群运行模式:
将mapreduce程序提交给yarn集群resourcemanager,分发到很多的节点上并发执行
处理的数据和输出结果应该位于hdfs文件系统
提交集群的实现步骤:
A、将程序打成JAR包,然后在集群的任意一个节点上用hadoop命令启动
$ hadoop jar wordcount.jar cn.itcast.bigdata.mrsimple.WordCountDriverinputpath outputpath
B、直接在linux的eclipse中运行main方法
(项目中要带参数:mapreduce.framework.name=yarn以及yarn的两个基本配置)
C、如果要在windows的eclipse中提交job给集群,则要修改YarnRunner类
MAPREDUCE中的排序初步
对日志数据中的上下行流量信息汇总,并输出按照总量流量倒序排序的结果
1363157985066 | 1372623050 | 00-fd-d7-A4-72-bb;CMCC | 120.196.100.82 | 24 | 27 | 2481 | 24681 | 200 |
---|
分析:
基本思路:实现自定义的bean来封装流量信息,并将bean作为map输出的key来传输
MR程序在处理数据的过程中会对数据排序(map输出的kv对传输到reduce之前,会排序),排序的依据是map输出的key
所以,我们如果要实现自己的排序规则,则可以考虑将排序因素放到key中,让key实现接口:WritableComparable
然后重写key的compareTo方法。
实现
自定义的bean
package com.mingming.bigdata.flowsum;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* @author Mingming
* @Description
* @Date Created in 15:09 2017/12/4
* @Modificd By
*/
public class FlowBean implements Writable{
private long upFlow;
private long dFlow;
private long sumFlow;
//反序列化时需要反射调用空参构造函数
public FlowBean() {
}
public FlowBean(long upFlow, long dFlow) {
this.upFlow = upFlow;
this.dFlow = dFlow;
this.sumFlow = upFlow + dFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getdFlow() {
return dFlow;
}
public void setdFlow(long dFlow) {
this.dFlow = dFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
/**
* 序列化方法
* @param dataOutput
* @throws IOException
*/
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(dFlow);
dataOutput.writeLong(sumFlow);
}
/**
* 反序列方法
* 注意反序列化的方法和序列化的顺序一致
* @param dataInput
* @throws IOException
*/
@Override
public void readFields(DataInput dataInput) throws IOException {
upFlow = dataInput.readLong();
dFlow = dataInput.readLong();
sumFlow = dataInput.readLong();
}
@Override
public String toString() {
return "FlowBean{" +
"upFlow=" + upFlow +
", dFlow=" + dFlow +
", sumFlow=" + sumFlow +
'}';
}
}
mapReduce程序
package com.mingming.bigdata.flowsum;
import com.mingming.bigdata.wcdemo.WorcCountMapper;
import com.mingming.bigdata.wcdemo.WordCountDriver;
import com.mingming.bigdata.wcdemo.WordCountReducer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author Mingming
* @Description
* @Date Created in 15:08 2017/12/4
* @Modificd By
*/
public class FlowCount {
static class FlowCountMapper extends Mapper<LongWritable,Text,Text,FlowBean>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//将一行内容转成String
String line = value.toString();
//切分字段
String[] fields = line.split("\t");
//取出手机号
String phoneNumber = fields[1];
//取出上行下行流量
long upFlow = Long.parseLong(fields[fields.length-3]);
long dFlow = Long.parseLong(fields[fields.length-2]);
context.write(new Text(phoneNumber),new FlowBean(upFlow,dFlow));
}
}
static class FlowCountReducer extends Reducer<Text,FlowBean,Text,FlowBean>{
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
long sumUpFlow = 0;
long sumDFlow = 0;
//遍历所有的bean,将其中的所有上行流量,下行流量分别累加
for (FlowBean bean: values){
sumUpFlow += bean.getUpFlow();
sumDFlow += bean.getdFlow();
}
FlowBean result = new FlowBean(sumUpFlow,sumDFlow);
context.write(key,result);
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
/*job.setJar("/home/lib/wc.jar");*/
//指定本程序的jarbao所在的本地路径
job.setJarByClass(FlowCount.class);
//指定本业务要使用的mapper的业务类
job.setMapperClass(FlowCountMapper.class);
//指定本业务要使用的Reducer业务类
job.setReducerClass(FlowCountReducer.class);
//指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//指定最终输出的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//指定job输入的原始文件所在的目录
FileInputFormat.setInputPaths(job,new Path(args[0]));
//指定job的输出结果所在目录
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//将job中配置的相关参数,以及job所用的java类的jar包,提交给yarn运行
/* job.submit();*/
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}
Mapreduce中的分区Partitioner
需求:
根据不同的地址,分配到不同的文件中
分析:
mapreduce中会将map输出的kv对,按照相同的key分组,然后分发给不同的reducetask默认的分发规则为:根据key的hashcode%reducetask数来分发。所以:如果要按照我们自己的需求来进行分组,则需要改写数据分发的组件partitioner,自定义一个Custompartitionre继承抽象类:partitioner。然后在job对象中,设置自定义的partitioner:job.setPartitionerClass(CustomPartitioner.class)
实现
/**
* 定义自己的从map到reduce之间的数据(分组)分发规则 按照手机号所属的省份来分发(分组)ProvincePartitioner
* 默认的分组组件是HashPartitioner
*
* @author
*
*/
public class ProvincePartitioner extends Partitioner<Text, FlowBean> {
static HashMap<String, Integer> provinceMap = new HashMap<String, Integer>();
static {
provinceMap.put("135", 0);
provinceMap.put("136", 1);
provinceMap.put("137", 2);
provinceMap.put("138", 3);
provinceMap.put("139", 4);
}
@Override
public int getPartition(Text key, FlowBean value, int numPartitions) {
Integer code = provinceMap.get(key.toString().substring(0, 3));
return code == null ? 5 : code;
}
}
自定义inputFormat
需求:
无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应的解决方法。
分析:
小文件的优化无非以下几种方式:
- 在数据采集的时候,就将小文件或小批数据合成大文件在上传到HDFS
- 在业务处理之前,在HDFS上使用mapreduce程序对小文件进行合并
- 在mapreduce处理时,可采用combineInputFormat提高效率
实现:
本节实现的上述第二种方式:
自定义一个InputFormat
改写RecordReader,实现一次读取一个完整文件分装为KV
在输出时使用SequenceFileOutputFormat输出合并文件。
代码如下:
自定义InputFormat
public class WholeFileInputFormat extends
FileInputFormat<NullWritable, BytesWritable> {
//设置每个小文件不可分片,保证一个小文件生成一个key-value键值对
@Override
protected boolean isSplitable(JobContext context, Path file) {
return false;
}
@Override
public RecordReader<NullWritable, BytesWritable> createRecordReader(
InputSplit split, TaskAttemptContext context) throws IOException,
InterruptedException {
WholeFileRecordReader reader = new WholeFileRecordReader();
reader.initialize(split, context);
return reader;
}
}
自定义RecordReader
class WholeFileRecordReader extends RecordReader<NullWritable, BytesWritable> {
private FileSplit fileSplit;
private Configuration conf;
private BytesWritable value = new BytesWritable();
private boolean processed = false;
@Override
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
this.fileSplit = (FileSplit) split;
this.conf = context.getConfiguration();
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!processed) {
byte[] contents = new byte[(int) fileSplit.getLength()];
Path file = fileSplit.getPath();
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream in = null;
try {
in = fs.open(file);
IOUtils.readFully(in, contents, 0, contents.length);
value.set(contents, 0, contents.length);
} finally {
IOUtils.closeStream(in);
}
processed = true;
return true;
}
return false;
}
@Override
public NullWritable getCurrentKey() throws IOException,
InterruptedException {
return NullWritable.get();
}
@Override
public BytesWritable getCurrentValue() throws IOException,
InterruptedException {
return value;
}
@Override
public float getProgress() throws IOException {
return processed ? 1.0f : 0.0f;
}
@Override
public void close() throws IOException {
// do nothing
}
}
定义mapreduce处理流程
public class SmallFilesToSequenceFileConverter extends Configured implements
Tool {
static class SequenceFileMapper extends
Mapper<NullWritable, BytesWritable, Text, BytesWritable> {
private Text filenameKey;
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
InputSplit split = context.getInputSplit();
Path path = ((FileSplit) split).getPath();
filenameKey = new Text(path.toString());
}
@Override
protected void map(NullWritable key, BytesWritable value,
Context context) throws IOException, InterruptedException {
context.write(filenameKey, value);
}
}
@Override
public int run(String[] args) throws Exception {
Configuration conf = new Configuration();
System.setProperty("HADOOP_USER_NAME", "hdfs");
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: combinefiles <in> <out>");
System.exit(2);
}
Job job = Job.getInstance(conf,"combine small files to sequencefile");
// job.setInputFormatClass(WholeFileInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(BytesWritable.class);
job.setMapperClass(SequenceFileMapper.class);
return job.waitForCompletion(true) ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new SmallFilesToSequenceFileConverter(),
args);
System.exit(exitCode);
}
}
定义mapreduce处理流程
public class SmallFilesToSequenceFileConverter extends Configured implements Tool {
static class SequenceFileMapper extends
Mapper<NullWritable, BytesWritable, Text, BytesWritable> {
private Text filenameKey;
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
InputSplit split = context.getInputSplit();
Path path = ((FileSplit) split).getPath();
filenameKey = new Text(path.toString());
}
@Override
protected void map(NullWritable key, BytesWritable value,
Context context) throws IOException, InterruptedException {
context.write(filenameKey, value);
}
}
@Override
public int run(String[] args) throws Exception {
Configuration conf = new Configuration();
System.setProperty("HADOOP_USER_NAME", "hdfs");
String[] otherArgs = new GenericOptionsParser(conf, args)
.getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: combinefiles <in> <out>");
System.exit(2);
}
Job job = Job.getInstance(conf,"combine small files to sequencefile");
// job.setInputFormatClass(WholeFileInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(BytesWritable.class);
job.setMapperClass(SequenceFileMapper.class);
return job.waitForCompletion(true) ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new SmallFilesToSequenceFileConverter(),
args);
System.exit(exitCode);
}
}
自定义outputFormat
需求:
现有一些原始日志需要做增强解析处理,流程:
- 从原始日志文件中读取数据
- 根据日志中的一个URL字段的外部知识库中获取信息增强到原始日志。
- 如果成功增强,则输出到增强目录;如果增强失败,则抽取原始数据中URL字段输出到带爬清单目录。
分析:
程序的关键点是要在一个mapreduce程序中根据数据的不同输出两类结果到不同目录,这类灵活的输出需求可以通过自定义的outputformat来实现。
实现:
实现要点:
- 在mapreduce中访问外部资源
- 自定义outputformat,改写其中的recordwriter,改写具体输出数据的方法write()
代码实现如下:
数据库获取数据的工作:
public class DBLoader {
public static void dbLoader(HashMap<String, String> ruleMap) {
Connection conn = null;
Statement st = null;
ResultSet res = null;
try {
Class.forName("com.mysql.jdbc.Driver");
conn = DriverManager.getConnection("jdbc:mysql://hdp-node01:3306/urlknowledge", "root", "root");
st = conn.createStatement();
res = st.executeQuery("select url,content from urlcontent");
while (res.next()) {
ruleMap.put(res.getString(1), res.getString(2));
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try{
if(res!=null){
res.close();
}
if(st!=null){
st.close();
}
if(conn!=null){
conn.close();
}
}catch(Exception e){
e.printStackTrace();
}
}
}
public static void main(String[] args) {
DBLoader db = new DBLoader();
HashMap<String, String> map = new HashMap<String,String>();
db.dbLoader(map);
System.out.println(map.size());
}
}
自定义一个outputformat
public class LogEnhancerOutputFormat extends FileOutputFormat<Text, NullWritable>{
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
FileSystem fs = FileSystem.get(context.getConfiguration());
Path enhancePath = new Path("hdfs://hdp-node01:9000/flow/enhancelog/enhanced.log");
Path toCrawlPath = new Path("hdfs://hdp-node01:9000/flow/tocrawl/tocrawl.log");
FSDataOutputStream enhanceOut = fs.create(enhancePath);
FSDataOutputStream toCrawlOut = fs.create(toCrawlPath);
return new MyRecordWriter(enhanceOut,toCrawlOut);
}
static class MyRecordWriter extends RecordWriter<Text, NullWritable>{
FSDataOutputStream enhanceOut = null;
FSDataOutputStream toCrawlOut = null;
public MyRecordWriter(FSDataOutputStream enhanceOut, FSDataOutputStream toCrawlOut) {
this.enhanceOut = enhanceOut;
this.toCrawlOut = toCrawlOut;
}
@Override
public void write(Text key, NullWritable value) throws IOException, InterruptedException {
//有了数据,你来负责写到目的地 —— hdfs
//判断,进来内容如果是带tocrawl的,就往待爬清单输出流中写 toCrawlOut
if(key.toString().contains("tocrawl")){
toCrawlOut.write(key.toString().getBytes());
}else{
enhanceOut.write(key.toString().getBytes());
}
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
if(toCrawlOut!=null){
toCrawlOut.close();
}
if(enhanceOut!=null){
enhanceOut.close();
}
}
}
}
开发mapreduce处理流程
/**
* 这个程序是对每个小时不断产生的用户上网记录日志进行增强(将日志中的url所指向的网页内容分析结果信息追加到每一行原始日志后面)
*
* @author
*
*/
public class LogEnhancer {
static class LogEnhancerMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
HashMap<String, String> knowledgeMap = new HashMap<String, String>();
/**
* maptask在初始化时会先调用setup方法一次 利用这个机制,将外部的知识库加载到maptask执行的机器内存中
*/
@Override
protected void setup(org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException {
DBLoader.dbLoader(knowledgeMap);
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = StringUtils.split(line, "\t");
try {
String url = fields[26];
// 对这一行日志中的url去知识库中查找内容分析信息
String content = knowledgeMap.get(url);
// 根据内容信息匹配的结果,来构造两种输出结果
String result = "";
if (null == content) {
// 输往待爬清单的内容
result = url + "\t" + "tocrawl\n";
} else {
// 输往增强日志的内容
result = line + "\t" + content + "\n";
}
context.write(new Text(result), NullWritable.get());
} catch (Exception e) {
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(LogEnhancer.class);
job.setMapperClass(LogEnhancerMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 要将自定义的输出格式组件设置到job中
job.setOutputFormatClass(LogEnhancerOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
// 虽然我们自定义了outputformat,但是因为我们的outputformat继承自fileoutputformat
// 而fileoutputformat要输出一个_SUCCESS文件,所以,在这还得指定一个输出目录
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
System.exit(0);
}
}
自定义GroupingComparator
需求:
有如下订单:
订单id | 商品ID | 成交金额 |
---|---|---|
Order_0000001 | pdt_01 | 222.8 |
现在需要求出每一个订单中成交金额最大的一笔交易
分析:
- 订单“订单id和成交金额”作为key,可以将map阶段读取到的所有订单数据按照id分区,按照订单金额排序,发送到reduce
- 在reduce端利用GroupingComparator将订单id相同的kv聚合成组,然后取第一个即最大值。
实现:
自定义groupingcomparator
/**
* 用于控制shuffle过程中reduce端对kv对的聚合逻辑
* @author duanhaitao@itcast.cn
*
*/
public class ItemidGroupingComparator extends WritableComparator {
protected ItemidGroupingComparator() {
super(OrderBean.class, true);
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
OrderBean abean = (OrderBean) a;
OrderBean bbean = (OrderBean) b;
//将item_id相同的bean都视为相同,从而聚合为一组
return abean.getItemid().compareTo(bbean.getItemid());
}
}
定义订单信息bean:
/**
* 订单信息bean,实现hadoop的序列化机制
* @author duanhaitao@itcast.cn
*
*/
public class OrderBean implements WritableComparable<OrderBean>{
private Text itemid;
private DoubleWritable amount;
public OrderBean() {
}
public OrderBean(Text itemid, DoubleWritable amount) {
set(itemid, amount);
}
public void set(Text itemid, DoubleWritable amount) {
this.itemid = itemid;
this.amount = amount;
}
public Text getItemid() {
return itemid;
}
public DoubleWritable getAmount() {
return amount;
}
@Override
public int compareTo(OrderBean o) {
int cmp = this.itemid.compareTo(o.getItemid());
if (cmp == 0) {
cmp = -this.amount.compareTo(o.getAmount());
}
return cmp;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(itemid.toString());
out.writeDouble(amount.get());
}
@Override
public void readFields(DataInput in) throws IOException {
String readUTF = in.readUTF();
double readDouble = in.readDouble();
this.itemid = new Text(readUTF);
this.amount= new DoubleWritable(readDouble);
}
@Override
public String toString() {
return itemid.toString() + "\t" + amount.get();
}
}
编写mapreduce流程:
/**
* 利用secondarysort机制输出每种item订单金额最大的记录
* @author duanhaitao@itcast.cn
*
*/
public class SecondarySort {
static class SecondarySortMapper extends Mapper<LongWritable, Text, OrderBean, NullWritable>{
OrderBean bean = new OrderBean();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = StringUtils.split(line, "\t");
bean.set(new Text(fields[0]), new DoubleWritable(Double.parseDouble(fields[1])));
context.write(bean, NullWritable.get());
}
}
static class SecondarySortReducer extends Reducer<OrderBean, NullWritable, OrderBean, NullWritable>{
//在设置了groupingcomparator以后,这里收到的kv数据 就是: <1001 87.6>,null <1001 76.5>,null ....
//此时,reduce方法中的参数key就是上述kv组中的第一个kv的key:<1001 87.6>
//要输出同一个item的所有订单中最大金额的那一个,就只要输出这个key
@Override
protected void reduce(OrderBean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key, NullWritable.get());
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(SecondarySort.class);
job.setMapperClass(SecondarySortMapper.class);
job.setReducerClass(SecondarySortReducer.class);
job.setOutputKeyClass(OrderBean.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//指定shuffle所使用的GroupingComparator类
job.setGroupingComparatorClass(ItemidGroupingComparator.class);
//指定shuffle所使用的partitioner类
job.setPartitionerClass(ItemIdPartitioner.class);
job.setNumReduceTasks(3);
job.waitForCompletion(true);
}
}
Mapreduce中的DistributedCache应用
Map端join案例
需求:
实现两个‘表’的join操作,其中一个数据量较小,一个表很大,这种情景在实际中非常常见,比如“订单日志‘join”产品信息“
分析:
—原理阐述
适用于关联表中有小表的情况
可以将小表分发到所有的map节点,这样,map节点就可以在本地对自己所读到的大表数据进行join并输出最终结果。
可以大大提高join的操作并发度,加快处理速度。
–示例:先在mpper类中预先定义好小表,进行join
–并用distributedcache机制将小表的数据分发到每一个maptask执行节点,从而每一个maptask节点可以从本地加载到小表数据,进而在本地即可实现join.
实现
public class TestDistributedCache {
static class TestDistributedCacheMapper extends Mapper<LongWritable, Text, Text, Text>{
FileReader in = null;
BufferedReader reader = null;
HashMap<String,String> b_tab = new HashMap<String, String>();
String localpath =null;
String uirpath = null;
//是在map任务初始化的时候调用一次
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//通过这几句代码可以获取到cache file的本地绝对路径,测试验证用
Path[] files = context.getLocalCacheFiles();
localpath = files[0].toString();
URI[] cacheFiles = context.getCacheFiles();
//缓存文件的用法——直接用本地IO来读取
//这里读的数据是map task所在机器本地工作目录中的一个小文件
in = new FileReader("b.txt");
reader =new BufferedReader(in);
String line =null;
while(null!=(line=reader.readLine())){
String[] fields = line.split(",");
b_tab.put(fields[0],fields[1]);
}
IOUtils.closeStream(reader);
IOUtils.closeStream(in);
}
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//这里读的是这个map task所负责的那一个切片数据(在hdfs上)
String[] fields = value.toString().split("\t");
String a_itemid = fields[0];
String a_amount = fields[1];
String b_name = b_tab.get(a_itemid);
// 输出结果 1001 98.9 banan
context.write(new Text(a_itemid), new Text(a_amount + "\t" + ":" + localpath + "\t" +b_name ));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(TestDistributedCache.class);
job.setMapperClass(TestDistributedCacheMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
//这里是我们正常的需要处理的数据所在路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//不需要reducer
job.setNumReduceTasks(0);
//分发一个文件到task进程的工作目录
job.addCacheFile(new URI("hdfs://hadoop-server01:9000/cachefile/b.txt"));
//分发一个归档文件到task进程的工作目录
// job.addArchiveToClassPath(archive);
//分发jar包到task节点的classpath下
// job.addFileToClassPath(jarfile);
job.waitForCompletion(true);
}
}
“