字段过滤

Filter

对于字段的过滤操作只需要map即可,不会产生reduce。

/*
 * 字段依次为:编号 姓名 学科 分数。
 * 
 * 获取:姓名为zyl的学生。结果只取字段:姓名 学科 分数
 * 
 * input:
 *1 zyl English 80
 *2 zyl Math 50
 *3 lyy English 90
 *4 lyy Chinese 80
 *
 *output:
 *zyl English 80	
 *zyl Math 50	
 * */

import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;


public class Filter {
	//只写了Mapper,没有Reduce
	public static class TokenizerMapper extends Mapper<Object, Text, Text, Text> {

		private String[] FilterStrings = { "zyl" };
		private List<String> FilterList = Arrays.asList(FilterStrings);

		public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
			StringTokenizer StringTokenLine = new StringTokenizer(value.toString(), " ");
			while (StringTokenLine.hasMoreTokens()) {
				String No = StringTokenLine.nextToken();
				String Name = StringTokenLine.nextToken();
				if (FilterList.contains(Name)) {
					String Subject = StringTokenLine.nextToken();
					String Score = StringTokenLine.nextToken();
					context.write(new Text(Name + " " + Subject + " " + Score), new Text(""));
				} else
					continue;

			}
		}
	}

	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		String HADOOP_HOME = System.getenv("HADOOP_HOME");
		String HADOOP_CONF_DIR = HADOOP_HOME + "/etc/hadoop";
		conf.addResource(new Path(HADOOP_CONF_DIR + "/core-site.xml"));
		conf.addResource(new Path(HADOOP_CONF_DIR + "/hdfs-site.xml"));
		conf.addResource(new Path(HADOOP_CONF_DIR + "/yarn-site.xml"));
		conf.set("mapreduce.job.ubertask.enable", "true");
		String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
		if (otherArgs.length < 2) {
			System.err.println("Usage: wordcount <in> [<in>...] <out>");
			System.exit(2);
		}
		Job job = Job.getInstance(conf, "Filter");
		job.setJarByClass(Filter.class);
		job.setMapperClass(TokenizerMapper.class);
		//这里不设置reduce class
	   //job.setReducerClass(IntSumReducer.class);
	   job.setOutputKeyClass(Text.class);
	   job.setOutputValueClass(Text.class);
		for (int i = 0; i < otherArgs.length - 1; ++i) {
			FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
		}
		FileOutputFormat.setOutputPath(job, new Path(otherArgs[otherArgs.length - 1]));
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}
}

也可以通过_job.setNumReduceTasks(0);_来设置reduce的数目为0,也能起到同样的效果。

当reduce数目为0时,直接使用NewDirectOutputCollector类,调用outputFormat.getRecordWriter然后进行输出。

输入图片说明输入图片说明

转载于:https://my.oschina.net/yulongblog/blog/740638

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值