GroupOpration
字段依次为:
_编号No
姓名Name
学科Subject
分数Score _
功能:按用户获取count(1)和sum(分数)
SQL表示:
_select Name,count(1),sum(score)
from table
where Name in ('zyl','lyy')
group by Name
_
输入input:
_1 zyl English 80
2 zyl Math 50
3 lyy English 90
4 lyy Chinese 80 _
输出output:
_zyl 2 130
lyy 2 170 _
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class GroupOpration {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private String[] FilterStrings = { "zyl", "lyy" };
private List<String> FilterList = Arrays.asList(FilterStrings);
private Text name = new Text();
private IntWritable score= new IntWritable();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer StringTokenLine = new StringTokenizer(value.toString(), " ");
while (StringTokenLine.hasMoreTokens()) {
String No = StringTokenLine.nextToken();
String Name = StringTokenLine.nextToken();
if (FilterList.contains(Name)) {
String Subject = StringTokenLine.nextToken();
String Score = StringTokenLine.nextToken();
name.set(Name);
score.set(Integer.parseInt(Score));
context.write(name,score);
} else
continue;
}
}
}
public static class GroupReducer extends Reducer<Text, IntWritable, Text, Text> {
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
int count = 0;
for (IntWritable val : values) {
sum += val.get();
count++;
}
context.write(key, new Text(Integer.toString(count) + " " + Integer.toString(sum)));
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String HADOOP_HOME = System.getenv("HADOOP_HOME");
String HADOOP_CONF_DIR = HADOOP_HOME + "/etc/hadoop";
conf.addResource(new Path(HADOOP_CONF_DIR + "/core-site.xml"));
conf.addResource(new Path(HADOOP_CONF_DIR + "/hdfs-site.xml"));
conf.addResource(new Path(HADOOP_CONF_DIR + "/yarn-site.xml"));
conf.set("mapreduce.job.ubertask.enable", "true");
conf.set("mapred.textoutputformat.separator", " ");
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 2) {
System.err.println("Usage: GroupOpration <in> [<in>...] <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "GroupOpration");
job.setJarByClass(GroupOpration.class);
job.setMapperClass(TokenizerMapper.class);
//job.setCombinerClass(GroupReducer.class);
job.setReducerClass(GroupReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
for (int i = 0; i < otherArgs.length - 1; ++i) {
FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
}
FileOutputFormat.setOutputPath(job, new Path(otherArgs[otherArgs.length - 1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}