hadoop_HA MapReduce 类定义及计数器

上传文本到hdfs文件系统中

对hdfs文件系统内的数据进行操作:

hdfs dfs -mkdir /wordcount
hdfs dfs -put /export/servers/wordcount.txt  /wordcount/
idear创建maven项目

在这里插入图片描述

导包
将代码复制到pom文件中:

<repositories>
        <repository>
            <id>cloudera</id>
            <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
        </repository>
</repositories>
<dependencies>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-client</artifactId>
            <version>2.6.0-mr1-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-common</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-hdfs</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.Hadoop</groupId>
            <artifactId>Hadoop-mapreduce-client-core</artifactId>
            <version>2.6.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.11</version>
            <scope>test</scope>
        </dependency>
        <dependency>
            <groupId>org.testng</groupId>
            <artifactId>testng</artifactId>
            <version>RELEASE</version>
        </dependency>
</dependencies>
<build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.0</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
        </plugin>
        <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>2.4.3</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <minimizeJar>true</minimizeJar>
                        </configuration>
                    </execution>
                </executions>
        </plugin>
        </plugins>
</build>
创建javabean

需要时创建

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class JavaBean implements Writable {
    private String string;
    private Integer integer;
    private double aDouble;
    private boolean aBoolean;

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeUTF(string);
        out.writeInt(integer);
        out.writeDouble(aDouble);
        out.writeBoolean(aBoolean);
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        this.string = in.readUTF();
        this.integer = in.readInt();
        this.aDouble = in.readDouble();
        this.aBoolean = in.readBoolean();
    }

    @Override
    public String toString() {
        return "JavaBean{" +
                "string='" + string + '\'' +
                ", integer=" + integer +
                ", aDouble=" + aDouble +
                ", aBoolean=" + aBoolean +
                '}';
    }

    public JavaBean() {
    }

    public JavaBean(String string, Integer integer, double aDouble, boolean aBoolean) {
        this.string = string;
        this.integer = integer;
        this.aDouble = aDouble;
        this.aBoolean = aBoolean;
    }

    public String getString() {
        return string;
    }

    public void setString(String string) {
        this.string = string;
    }

    public Integer getInteger() {
        return integer;
    }

    public void setInteger(Integer integer) {
        this.integer = integer;
    }

    public double getaDouble() {
        return aDouble;
    }

    public void setaDouble(double aDouble) {
        this.aDouble = aDouble;
    }

    public boolean isaBoolean() {
        return aBoolean;
    }

    public void setaBoolean(boolean aBoolean) {
        this.aBoolean = aBoolean;
    }
}

定义一个Mapper类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;

public class WordCountMap extends Mapper<LongWritable, Text,Text,LongWritable> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

    }
}
定义一个Reducer类

传入参数应与Mapper类的输出类型相同

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;

public class WordCountReduce extends Reducer<Text, LongWritable,Text,LongWritable> {
    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {

    }
}
定义一个Partition,用来分区

需要分区时定义,传入参数应与Mapper类的输出类型相同,定义后需要在主类中设置

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

public class Partition extends Partitioner<Text, LongWritable> {
    @Override
    public int getPartition(Text text, LongWritable longWritable, int i) {
        return 0;
    }

}

定义一个主类,用来描述job并提交job
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCountDriver {
    public static void main(String[] args) throws Exception {

        Job job = Job.getInstance(new Configuration(), "WordCount_002");
/*
          设置分区 和 分区数量
		job.setPartitionerClass(Partition.class);
        job.setNumReduceTasks(1);
*/

//        设置程序的主类
		job.setJarByClass(WordCountDriver.class);
		
//        设置map程序 和 ruduce 程序代码
		job.setMapperClass(WordCountMap.class);
        job.setReducerClass(WordCountReduce.class);

//        设置map输出的key value 的类型
 		job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);


//        设置Reduce输出的key value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);


//        设置去哪里读取数据
        FileInputFormat.addInputPath(job, new Path("hdfs中读取数据的路径"));
//        设置最终结果写到哪里去
        FileOutputFormat.setOutputPath(job, new Path("写入到hdfs中的路径"));

//        提交作业
//        job.submit();通常不用
        boolean b = job.waitForCompletion(true);
        System.exit(b ? 0 : 1);
    }
}

将代码打包成jar包上传到服务器上运行

在这里插入图片描述

在这里插入图片描述

将jar包上传到服务器:
rz      (rz下载:yum install -y lrzsz)
运行jar包:
hadoop jar hadoop_HA_20201023_1-1.0-SNAPSHOT.jar WordCountDriver
hadoop jar 包名 主类路径

在这里插入图片描述

计数器
// Map 和 Reduce 里面添加计数器:
 	Counter counter1 = context.getCounter("组名", "成员名");
        	counter1.increment(累加次数);

在这里插入图片描述

  • 计数器是收集作业统计信息的有效手段之一,用于质量控制或应用级统计。计数器还可辅助诊断系统故障。如果需要将日志信息传输到map 或reduce 任务, 更好的方法通常是看能否用一个计数器值来记录某一特定事件的发生。对于大型分布式作业而言,使用计数器更为方便。除了因为获取计数器值比输出日志更方便,还有根据计数器值统计特定事件的发生次数要比分析一堆日志文件容易得多。
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值