mr利用mapjoin计算出每个部门的总工资以及部门名称

把作业素材dept.txt(部门表) 和 emp.txt(员工表)导入hadoop中
求各个部门的总工资(可以使用map side join 或者 reduce side join)
结果如下:
ACCOUNTING 8750
RESEARCH 6775
SALES 9400

dept.txt数据
10,ACCOUNTING,NEW YORK
20,RESEARCH,DALLAS
30,SALES,CHICAGO
40,OPERATIONS,BOSTON

emp.txt数据
7369,SMITH,CLERK,7902,17-12月-80,800,20
7499,ALLEN,SALESMAN,7698,20-2月-81,1600,300,30
7521,WARD,SALESMAN,7698,22-2月-81,1250,500,30
7566,JONES,MANAGER,7839,02-4月-81,2975,20
7654,MARTIN,SALESMAN,7698,28-9月-81,1250,1400,30
7698,BLAKE,MANAGER,7839,01-5月-81,2850,30
7782,CLARK,MANAGER,7839,09-6月-81,2450,10
7839,KING,PRESIDENT,17-11月-81,5000,10
7844,TURNER,SALESMAN,7698,08-9月-81,1500,0,30
7900,JAMES,CLERK,7698,03-12月-81,950,30
7902,FORD,ANALYST,7566,03-12月-81,3000,20
7934,MILLER,CLERK,7782,23-1月-82,1300,10

package com.bw.project;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;



public class MR {
	public static class DMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
		Map<String, String> map = new HashMap<>();
		@Override
		protected void setup(Mapper<LongWritable, Text, Text, IntWritable>.Context context)
				throws IOException, InterruptedException {
			FileSystem fs=FileSystem.getLocal(context.getConfiguration());
			Path[] paths = context.getLocalCacheFiles();
			InputStream in = fs.open(paths[0]);
			BufferedReader bf=new BufferedReader(new InputStreamReader(in));
			String read="";
			while ((read=bf.readLine())!=null) {
				String[] split = read.split(",");
				//context.write(new Text(split[0]), new Text("t1:"+split[1]));
				map.put(split[0], split[1]);
			}
			
			bf.close();
			in.close();
			fs.close();
		}
		
		@Override
		protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
				throws IOException, InterruptedException {
			String[] split = value.toString().split(",");
			if ((map.get(split[7]))!=null) {
				String dname=map.get(split[7]);
				String dwString=dname+":"+split[5];
				map.put(split[7], dwString);
			}
		}
		@Override
		protected void cleanup(Mapper<LongWritable, Text, Text, IntWritable>.Context context)
				throws IOException, InterruptedException {
			Collection<String> values = map.values();
			for (String s : values) {
				int sum=0;
				String[] split = s.split(":");
				for (int i = 1; i < split.length; i++) {
					sum+=Integer.parseInt(split[i]);
				}
				if (sum>0) {
					context.write(new Text(split[0]), new IntWritable(sum));
				}
			}
		}
	}
	
	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		Job job = Job.getInstance(conf);
		job.setJarByClass(Demo.class);
		job.addCacheFile(new URI("dept.txt"));
		
		job.setMapperClass(DMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(Text.class);
		
		job.setNumReduceTasks(0);
		/*job.setReducerClass(DReduce.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);*/
		
		FileInputFormat.addInputPath(job,new Path("emp.txt"));
		FileOutputFormat.setOutputPath(job, new Path("dept1"));
		
		job.waitForCompletion(true);
	}
	
	
}


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值