Hadoop-第一个MapReduce程序(WordCount)开发

1.准备数据文件
aa.log(注意空格)
wenxin xaiowen wangwu
xiaowen xiaoxin wenxin
xiaowen zhangshan lisi
2. 启动Hadoop集群
3. 将数据文件上传到HDFS文件系统中

[root@Cluster00 ~]# hdfs dfs -mkdir /wordcount
[root@Cluster00 ~]# hdfs dfs -put aa.lpg /wordcount

在这里插入图片描述
在这里插入图片描述
3.开发MapReduce

引入相关依赖

    <properties>
        <hadoop.version>2.7.3</hadoop.version>
    </properties>
    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-common</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-examples</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
    </dependencies>

开发一个job作业

开发map模块

 //map阶段 hadoop包装 long->LongWritable String->Text
    public static class WordCountMap extends Mapper<LongWritable, Text,Text, IntWritable>{

        @Override//inputFormat 输出一次就会调用一次这个方法
        //参数1:行首字母的偏移量 参数2:当前行的值
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] words=value.toString().split(" ");
            for(String word:words){
                context.write(new Text(word),new IntWritable(1));
            }
//            super.map(key, value, context);
        }
    }

开发reduce模块

    //reduuce阶段
    public static class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable>{
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {

            int sum=0;
            for(IntWritable value:values){
                sum+=value.get();
            }
            //输出结果
            context.write(key,new IntWritable(sum));
            //            super.reduce(key, values, context);
        }
    }

开发job模块

    public static void main(String[] args) throws  Exception{
        ///指定job作业任务的对象是谁
        ToolRunner.run(new wordcount(),args);

    }

    public int run(String[] strings) throws Exception{
        //创建job作业对象
        Job job=Job.getInstance(getConf());
        job.setJarByClass(wordcount.class);
        //设置inputFormat
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("/wordcount/aa.log"));
        //设置map
        job.setMapperClass(WordCountMap.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        //设置shuffle
        //设置reduce
        job.setReducerClass(WordCountReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        //设置outputFormat
        job.setOutputFormatClass(TextOutputFormat.class);
        //        一定要保证outputFormate输出结果必须不存在
        TextOutputFormat.setOutputPath(job,new Path("/wordcount/result"));
        //提交job作业
//        job.submit();
        boolean status=job.waitForCompletion(true);
        System.out.println("wordcount"+status);

        return 0;
    }

完整代码

package com.wenxin.wordcount;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;



public class wordcount extends Configured implements Tool {


    public static void main(String[] args) throws  Exception{
        ///指定job作业任务的对象是谁
        ToolRunner.run(new wordcount(),args);

    }

    public int run(String[] strings) throws Exception{
        //创建job作业对象
        Job job=Job.getInstance(getConf());
        job.setJarByClass(wordcount.class);
        //设置inputFormat
        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path("/wordcount/aa.log"));
        //设置map
        job.setMapperClass(WordCountMap.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        //设置shuffle
        //设置reduce
        job.setReducerClass(WordCountReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        //设置outputFormat
        job.setOutputFormatClass(TextOutputFormat.class);
        //        一定要保证outputFormate输出结果必须不存在
        TextOutputFormat.setOutputPath(job,new Path("/wordcount/result"));
        //提交job作业
//        job.submit();
        boolean status=job.waitForCompletion(true);
        System.out.println("wordcount"+status);

        return 0;
    }



    //map阶段 hadoop包装 long->LongWritable String->Text
    public static class WordCountMap extends Mapper<LongWritable, Text,Text, IntWritable>{

        @Override//inputFormat 输出一次就会调用一次这个方法
        //参数1:行首字母的偏移量 参数2:当前行的值
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] words=value.toString().split(" ");
            for(String word:words){
                context.write(new Text(word),new IntWritable(1));
            }
//            super.map(key, value, context);
        }
    }
    //reduuce阶段
    public static class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable>{
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {

            int sum=0;
            for(IntWritable value:values){
                sum+=value.get();
            }
            //输出结果
            context.write(key,new IntWritable(sum));
            //            super.reduce(key, values, context);
        }
    }
    
}

执行作业
将项目上传到yarn集群中
执行命令:

[root@Cluster00 ~]# yarn jar hadoop-wordcount-1.0-SNAPSHOT.jar com.wenxin.wordcount.wordcount

在这里插入图片描述
在这里插入图片描述
执行命令查看结果:

[root@Cluster00 ~]# hdfs dfs -cat /wordcount/result/part-r-00000

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

文文鑫

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值