WordCount案例的Java实现和Scala实现

WordCount案例的Java实现

maven项目导入依赖

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.2</version>
        </dependency>

Mapper类

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;

public class WCMapper extends Mapper<LongWritable,Text,Text,IntWritable> {

    String name;

    //从本地读取小文件或者想从driver端拿出参数
    public void setup(Context context){
        name = context.getConfiguration().get("name");
    }

    public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException {
        //每行的内容转为字符串并切分
        String[] strs = value.toString().split("\t");
        //组装<k,v>并提交
        for (String str : strs) {
            Text text = new Text();
            text.set(name + "-" + str);
            context.write(text,new IntWritable(1));
        }
    }

    public void cleanup(Context context) throws IOException, InterruptedException {
        super.cleanup(context);
    }
}

Reducer类

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import java.io.IOException;

public class WCReducer extends Reducer<Text,IntWritable,Text,IntWritable> {

    MultipleOutputs mo;
    //提前创建对象
    public void setup(Context context) throws IOException, InterruptedException {
        //创建多路输出对象,结果输出到不同的文件中
        mo = new MultipleOutputs(context);
        super.setup(context);
    }

    public void reduce(Text key,Iterable<IntWritable> value,Context context) throws IOException, InterruptedException {
        //设置变量进行累加
        int sum = 0;
        //遍历value
        for(IntWritable i : value){
            sum += i.get();
        }
        //组装<k,v>
        //context.write(key,new IntWritable(sum));
        mo.write(key,new IntWritable(sum),"CDS");
        mo.write(key,new IntWritable(sum),"CES");
    }

    public void cleanup(Context context) throws IOException, InterruptedException {
        //关闭资源
        mo.close();
        super.cleanup(context);
    }
}

Driver类

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WCDriver {
    public static void main(String[] args) throws Exception {

        //创建配置对象
        Configuration conf = new Configuration();
        //自定义参数
        conf.set("name","cxb");
        //通过配置对象,创建job对象
        Job job = Job.getInstance(conf);

        //设置wordcount程序jar的位置
        job.setJarByClass(WCDriver.class);
        //设置job的mapper的keyout、valueout
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        //设置job的最终输出的keyout、valueout
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //设置job的mapper、reducer类
        job.setMapperClass(WCMapper.class);
        job.setReducerClass(WCReducer.class);

        //如果输出路径存在删除
        Path path = new Path("C:/Users/陈大帅/Desktop/a");
        FileSystem fs = FileSystem.get(conf);
        if(fs.exists(path)){
            fs.delete(path,true);
        }

        //设置job的输入输出路径
        FileInputFormat.setInputPaths(job,new Path("C:/Users/陈大帅/Desktop/a.txt"));
        FileOutputFormat.setOutputPath(job,new Path("C:/Users/陈大帅/Desktop/a"));

        //提交job
        boolean b = job.waitForCompletion(true);
        System.out.println(b);

    }
}

WordCount案例的Scala实现

maven项目导入依赖

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>2.1.1</version>
        </dependency>

Object

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WCScala {
  def main(args: Array[String]): Unit = {
    //创建SparkConf对象并设置运行模式和设置App名称(创建配置环境上下文)
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getName)
    //创建SparkContext对象,该对象是提交Spark App的入口
    val sc = new SparkContext(conf)
    //使用sc创建RDD并执行相应的transformation和action,设置一个分区,默认好像要分2个分区
    //读取数据
    val source: RDD[String] = sc.textFile("C:/Users/陈大帅/Desktop/a.txt",1)
    //遍历字符串进行数据预处理
    val words: RDD[String] = source.flatMap(_.split("\t",-1))
    //用1标记成元组
    val wordAndOne: RDD[(String, Int)] = words.map((_,1)).cache()
    //进行聚合累加
    val wordAndOneCount: RDD[(String, Int)] = wordAndOne.reduceByKey(_+_)
    //如果输出路径存在删除
    val path = new Path("C:/Users/陈大帅/Desktop/a")
    val fs: FileSystem = FileSystem.get(new Configuration())
    if (fs.exists(path)){
      fs.delete(path, true)
    }
    //将结果输出
    wordAndOneCount.saveAsTextFile("C:/Users/陈大帅/Desktop/a")
    //释放资源
    sc.stop();
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值