首先数据格式为:
相同字母组成的单词汇总,根据MapReduce的特点,map端的输出会在shuffle阶段进行按key分区输出到reduce端,利用这个特性,我们可以分析试图使相同字母组成的单词拥有同一个key值,正好相同字母就是可以作为相同值、只需要我们进行字母排序,因为存在大小写字母我们可以统一大小写再对字母排序来实现相同key。
map端输出数据格式:[ act , act ] [ act , cat ] [ act , CAT ]
reduce端收到数据格式:[ act , (act,cat,CAT) ]
代码实现:
public class Word {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration);
job.setJarByClass(Word.class);
job.setMapperClass(WordMapper.class);
job.setReducerClass(WordReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job,new Path("input/word.txt"));
FileOutputFormat.setOutputPath(job,new Path("output"));
System.out.println(job.waitForCompletion(true) ? 0 :1);
}
static class WordMapper extends Mapper<LongWritable, Text,Text,Text>{
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
String[] words = value.toString().split(" ");
for (String word : words) {
// 转化为小写
String lowerWord = word.toLowerCase();
// 转化为字符数组
char[] cs = lowerWord.toCharArray();
// 对字符数组排序
Arrays.sort(cs);
context.write(new Text(String.valueOf(cs)), new Text(word));
}
}
}
static class WordReducer extends Reducer<Text,Text,Text,NullWritable>{
@Override
protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, NullWritable>.Context context) throws IOException, InterruptedException {
List<String> str = new ArrayList<>();
for (Text value : values) {
str.add(value.toString());
}
context.write(new Text(String.valueOf(str)),NullWritable.get());
}
}
}
我们还可以使用sparkRDD算子来实现这个功能
object WordCollect {
def main(args: Array[String]): Unit = {
val sc = new SparkContext(new SparkConf().setMaster("local[*]").setAppName("WordCollect"))
val rdd: RDD[String] = sc.textFile("datas/word2.txt")
rdd.flatMap(
// 扁平化操作,获取每一个单词
line => {
val strings: Array[String] = line.split(" ")
strings
}
).map(
// 转换为key-value形式,key进行设计实现相同key
str => {
val array: Array[Char] = str.toLowerCase.toCharArray
val sorted: Array[Char] = array.sorted
(String.valueOf(sorted),str)
}
).groupByKey().collect().foreach(println) // 根据key进行分组
sc.stop()
}
}
结果: