MapReduce实战-词频统计、文件合并排序_mapreduc 词频统计排序(2)

img
img

网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。

需要这份系统化的资料的朋友,可以戳这里获取

一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount {

public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
        StringTokenizer itr = new StringTokenizer(value.toString());
        while (itr.hasMoreTokens()) {
            word.set(itr.nextToken());
            context.write(word, one);
        }
    }
}

public static class IntSumReducer
        extends Reducer<Text, IntWritable, Text, IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        int sum = 0;
        for (IntWritable val : values) {
            sum += val.get();
        }
        result.set(sum);
        context.write(key, result);
    }
}

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    if (args.length != 2) {
        System.err.println("usage: WordCount <in> <out>");
        System.exit(2);
    }
    Job job = Job.getInstance(conf,"WordCount");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(WordCount.TokenizerMapper.class);
    job.setCombinerClass(WordCount.IntSumReducer.class);
    job.setReducerClass(WordCount.IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

}


2. 编译打包



javac WordCount.java -cp $(hadoop classpath)
jar -cvf WordCount.jar *.class


![在这里插入图片描述](https://img-blog.csdnimg.cn/2021050513195896.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)


### 输出



#hadoop jar jar包位置 主类名 参数1(输入路径) 参数2(输出路径)
hadoop jar /usr/file/WordCount/WordCount.jar WordCount WordCount/input WordCount/output
#查看输出
hadoop fs -cat WordCount/output/*


![在这里插入图片描述](https://img-blog.csdnimg.cn/20210505140726822.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)  
 ![在这里插入图片描述](https://img-blog.csdnimg.cn/20210505140826565.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)  
 附:重启步骤



exit #退出docker容器
shutdown -r now #重启系统
systemctl start docker #启动docker服务
docker start hadoop1 #启动相应容器
docker start hadoop2
docker start hadoop3
docker exec -it hadoop1 bash #进入主节点
$HADOOP_HOME/sbin/./start-all.sh #启动集群


## 合并




---


对输入文件进行合并,剔除其中重复的内容。  
 如输入:  
 file1.txt



20150101 x
20150102 y
20150103 x
20150104 y
20150105 z
20150106 x


file2.txt



20150101 y
20150102 y
20150103 x
20150104 z
20150105 y


输出:



20150101 x
20150101 y
20150102 y
20150103 x
20150104 y
20150104 z
20150105 y
20150105 z
20150106 x


### 输入



cd /usr/file #没有则新建一个目录
mkdir Merge
cd Merge
vi file1.txt#内容就是上面的file1.txt
vi file2.txt
hadoop fs -mkdir Merge
hadoop fs -mkdir Merge/input
hadoop fs -put /usr/file/Merge/file*.txt Merge/input
hadoop fs -ls Merge/input


![在这里插入图片描述](https://img-blog.csdnimg.cn/20210505143157529.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)



> 
> (~~插播反爬信息~~ )博主CSDN地址:<https://wzlodq.blog.csdn.net/>
> 
> 
> 


### 源码



vi Merge.java
javac Merge.java -cp $(hadoop classpath)
jar -cvf Merge.jar *.class



import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;

public class Merge {

public static class Map extends Mapper<Object, Text, Text, Text> {
    private static Text text = new Text();

    public void map(Object key, Text value, Context content) throws IOException, InterruptedException {
        text = value;
        content.write(text, new Text(""));
    }
}

public static class Reduce extends Reducer<Text, Text, Text, Text> {
    public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        context.write(key, new Text(""));
    }
}

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    if (args.length != 2) {
        System.err.println("usage: Merge <in> <out>");
        System.exit(2);
    }
    Job job = Job.getInstance(conf,"Merge");
    job.setJarByClass(Merge.class);
    job.setMapperClass(Merge.Map.class);
    job.setReducerClass(Merge.Reduce.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

}


![在这里插入图片描述](https://img-blog.csdnimg.cn/20210505143543763.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)


### 输出



hadoop jar /usr/file/Merge/Merge.jar Merge Merge/input Merge/output
hadoop fs -cat Merge/output/*


![在这里插入图片描述](https://img-blog.csdnimg.cn/20210505153111821.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)  
 ![在这里插入图片描述](https://img-blog.csdnimg.cn/2021050515314880.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)


## 排序




---


读取所有输入文件中的整数,进行升序排序后,输出到一个新文件。  
 输入:  
 file1.txt



33
37
12
40


file2.txt



4
16
39
5


file3.txt



1
45
25


输出:  
 输出的数据格式为每行两个整数,第一个整数位第二个整数的排序为此,第二个整数为原待排列的整数。



1 1
2 4
3 5
4 12
5 16
6 25
7 33
8 37
9 39
10 40
11 45


### 输入



cd /usr/file #没有则新建一个目录
mkdir Sort
cd Sort
vi file1.txt#内容就是上面的file1.txt
vi file2.txt
vi file3.txt
hadoop fs -mkdir Sort
hadoop fs -mkdir Sort/input
hadoop fs -put /usr/file/Sort/file*.txt Sort/input
hadoop fs -ls Sort/input


![在这里插入图片描述](https://img-blog.csdnimg.cn/20210505172347940.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzQ1MDM0NzA4,size_16,color_FFFFFF,t_70)


### 源码



vi Sort.java
javac Sort.java -cp $(hadoop classpath)
jar -cvf Sort.jar *.class



import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Sort {
public static class Map extends Mapper<Object,Text,IntWritable,IntWritable>{
private static IntWritable data=new IntWritable();
public void map(Object key,Text value,Context context) throws IOException, InterruptedException{
String line=value.toString();
data.set(Integer.parseInt(line));
context.write(data, new IntWritable(1));
}
}
public static class Reduce extends Reducer<IntWritable,IntWritable,IntWritable,IntWritable>{
private static IntWritable linenum=new IntWritable(1);
public void reduce(IntWritable key,Iterable values,Context context) throws IOException, InterruptedException{
for(IntWritable num:values){
context.write(linenum, key);
linenum=new IntWritable(linenum.get()+1);
}

    }
}

public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException{
    Configuration conf=new Configuration();
    if(args.length!=2){
        System.err.println("Usage:Sort <in> <out>");
        System.exit(2);
    }
    Job job=Job.getInstance(conf,"Sort");
    job.setJarByClass(Sort.class);
    job.setMapperClass(Sort.Map.class);
    job.setReducerClass(Sort.Reduce.class);
    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(IntWritable.class);

img
img

网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。

需要这份系统化的资料的朋友,可以戳这里获取

一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!

  job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(IntWritable.class);

[外链图片转存中…(img-dyG84Ndk-1715807862127)]
[外链图片转存中…(img-gu9P3oD9-1715807862127)]

网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。

需要这份系统化的资料的朋友,可以戳这里获取

一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值