hvie小操作·自定义函数UDAF 总结

内容有两个

1、使用AbstractGenericUDAFResolver类返回hive单列所有字符串字符个数;

2、使用GenericUDAFResolver2接口返回hive表行数;

配置:pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.1.6.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>com.hello</groupId>
    <artifactId>hive</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>hive</name>
    <description>Demo project for Spring Boot</description>

    <properties>
        <java.version>1.8</java.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
            <exclusions>
                <exclusion>
                    <groupId>com.vaadin.external.google</groupId>
                    <artifactId>android-json</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <!--添加hive依赖 -->
       <dependency>
           <groupId>org.apache.hive</groupId>
           <artifactId>hive-exec</artifactId>
           <version>3.1.1</version>
       </dependency>
       
        <!-- Spark dependency  2.0.0 -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>2.4.3</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
        </plugins>
    </build>

</project>

 

代码详情

1、使用AbstractGenericUDAFResolver类返回hive单列所有字符串

package com.hello.hive;/**
 * @ProjectName: hive
 * @Package: com.hello.hive
 * @ClassName: TotalNumOfLettersGenericUDAF
 * @Author: dongsong
 * @Description: 聚合函数UDAF-统计表字段字符个数
 * @Date: 2019/8/7 10:28
 * @Version: 1.0
 */

import com.hello.model.Person;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;

import java.util.HashMap;
import java.util.Map;

/**
 *@program: hive
 *@description: 聚合函数UDAF-统计表字段字符个数
 *@author: by song
 *@create: 2019-08-07 10:28
 */

@Description(name = "letters", value = "_FUNC_(expr) - 返回该列中所有字符串的字符总数")
public class TotalNumOfLettersGenericUDAF extends AbstractGenericUDAFResolver {

    @Override
    public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
            throws SemanticException {
        if (parameters.length != 1) {
            throw new UDFArgumentTypeException(parameters.length - 1,
                    "Exactly one argument is expected.");
        }
        //Serde实现数据序列化和反序列化以及提供一个辅助类ObjectInspector帮助使用者访问需要序列化或者反序列化的对象。
        //
        //Serde层构建在数据存储和执行引擎之间,实现数据存储+中间数据存储和执行引擎的解耦。
        //这里为什么提到数据存储和中间数据存储两个概念,因为数据序列化和反序列化不仅仅用在对目标文件的读取和结果数据写入,
        // 还需要实现中间结果保存和传输,hive最终会将SQL转化为mapreduce程序,而mapreduce程序需要读取原始数据,并将最终的结果数据写入存储介质,
        // Serde一方面用在针对inputformat中RecordReader读取数据的解析和最终结果的保存,另一方面,在map和reduce之间有一层shuffle,
        // 中间结果由hadoop完成shuffle后也需要读取并反序列化成内部的object,这个object实际上通常是一个Array或者list,
        // 但hive会提供一个StandardStructObjectInspector给用户进行该Object的访问。

        //作用主要是解耦数据使用与数据格式,使得数据流在输入输出端切换不同的输入输出格式,不同的Operator上使用不同的格式。
        ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[0]);
        //getCategory()方法获取ObjectInspector 对象的类型
        System.out.println("getCategory()方法获取ObjectInspector 对象的类型");
        System.out.println(oi.getCategory().name());
        if (oi.getCategory() != ObjectInspector.Category.PRIMITIVE){
            throw new UDFArgumentTypeException(0,
                    "Argument must be PRIMITIVE, but "
                            + oi.getCategory().name()
                            + " was passed.");
        }

        //将ObjectInspector 对象强制转换为PrimitiveObjectInspector
        PrimitiveObjectInspector inputOI = (PrimitiveObjectInspector) oi;
        //getCategory()方法获取PrimitiveObjectInspector 对象的类型
        System.out.println("getCategory()方法获取PrimitiveObjectInspector 对象的类型");
        System.out.println(inputOI.getCategory().name());
        if (inputOI.getPrimitiveCategory() != PrimitiveObjectInspector.PrimitiveCategory.STRING){
            throw new UDFArgumentTypeException(0,
                    "Argument must be String, but "
                            + inputOI.getPrimitiveCategory().name()
                            + " was passed.");
        }

        return new TotalNumOfLettersEvaluator();
    }
    //定义一个新的类,用于继承GenericUDAFEvaluator,来使用UDAF操作
    public static class TotalNumOfLettersEvaluator extends GenericUDAFEvaluator {

        PrimitiveObjectInspector inputOI;
        ObjectInspector outputOI;
        PrimitiveObjectInspector integerOI;

        int total = 0;
        //每个子类都应该覆盖这个函数
        //子类应该调用super。初始化(m,参数)以获得模式设置。
        // 确定各个阶段输入输出参数的数据格式ObjectInspectors
        //这是我非常想说的类,init()他会根据model值的不同来决定输入的类型和输出的类型,非常的灵活。
        // 并且注意,init()的调用不是单次的,是多次的。
        @Override
        public ObjectInspector init(Mode m, ObjectInspector[] parameters)
                throws HiveException {
            //assert 断言   使用true 继续执行
            assert (parameters.length == 1);
            //java中的super关键字是一个引用变量,用于引用父类对象
            super.init(m, parameters);

            //map阶段读取sql列,输入为String基础数据格式
            if (m == Mode.PARTIAL1 || m == Mode.COMPLETE) {
                inputOI = (PrimitiveObjectInspector) parameters[0];
            } else {
                //其余阶段,输入为Integer基础数据格式
                integerOI = (PrimitiveObjectInspector) parameters[0];
            }

            // 指定各个阶段输出数据格式都为Integer类型
            outputOI = ObjectInspectorFactory.getReflectionObjectInspector(Integer.class,
                    ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
            return outputOI;

        }

        /**
         * 存储当前字符总数的类
         */
        //AggregationBuffer 允许我们保存中间结果,通过定义我们的buffer,
        // 我们可以处理任何格式的数据,在代码例子中字符总数保存在AggregationBuffer 。
        static class LetterSumAgg implements AggregationBuffer {
            int sum = 0;
            void add(int num){
                sum += num;
            }
        }
        // 保存数据聚集结果的类
        @Override
        public AggregationBuffer getNewAggregationBuffer() throws HiveException {
            LetterSumAgg result = new LetterSumAgg();
            return result;
        }
        // 重置聚集结果
        @Override
        public void reset(AggregationBuffer agg) throws HiveException {
            LetterSumAgg myagg = new LetterSumAgg();
        }

        private boolean warned = false;
        // map阶段,迭代处理输入sql传过来的列数据
        // 注意这里的迭代,当map阶段从表中读取一行时,就会调用一次iterate()方法,如果存在多行,就会调用多次。
        @Override
        public void iterate(AggregationBuffer agg, Object[] parameters)
                throws HiveException {
            assert (parameters.length == 1);
            if (parameters[0] != null) {
                LetterSumAgg myagg = (LetterSumAgg) agg;
                Object p1 = ((PrimitiveObjectInspector) inputOI).getPrimitiveJavaObject(parameters[0]);
                myagg.add(String.valueOf(p1).length());
            }
        }

        // map与combiner结束返回结果,得到部分数据聚集结果
        @Override
        public Object terminatePartial(AggregationBuffer agg) throws HiveException {
            LetterSumAgg myagg = (LetterSumAgg) agg;
            total += myagg.sum;
            return total;
        }
        // combiner合并map返回的结果,还有reducer合并mapper或combiner返回的结果。
        @Override
        public void merge(AggregationBuffer agg, Object partial)
                throws HiveException {
            if (partial != null) {

                LetterSumAgg myagg1 = (LetterSumAgg) agg;

                Integer partialSum = (Integer) integerOI.getPrimitiveJavaObject(partial);

                LetterSumAgg myagg2 = new LetterSumAgg();

                myagg2.add(partialSum);
                myagg1.add(myagg2.sum);
            }
        }
        // reducer阶段,输出最终结果
        @Override
        public Object terminate(AggregationBuffer agg) throws HiveException {
            LetterSumAgg myagg = (LetterSumAgg) agg;
            total = myagg.sum;
            return myagg.sum;
        }

    }
}

测试结果

 

2、使用GenericUDAFResolver2接口返回hive表行数;

package com.hello.hive;
/**
 * @ProjectName: hive
 * @Package: com.hello.hive
 * @ClassName: New_Count
 * @Author: dongsong
 * @Description: Hive中的UDAF是多进一出,类似聚合函数Count(),Sum()
 * @Date: 2019/8/7 14:19
 * @Version: 1.0
 */

import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFParameterInfo;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver2;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.io.LongWritable;
import org.apache.juli.logging.Log;
import org.apache.juli.logging.LogFactory;

/**
 *@program: hive
 *@description: Hive中的UDAF是多进一出,类似聚合函数Count(),Sum()
 *@author: by song
 *@create: 2019-08-07 14:19
 */
public class New_Count implements GenericUDAFResolver2 {
    private static final Log LOG = LogFactory.getLog(GenericUDAFCount.class.getName());
    //检查参数类型,多用
    public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
            throws SemanticException {
        // This method implementation is preserved for backward compatibility.
        return new GenericUDAFCountEvaluator();
    }

    //检查参数类型
    public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo paramInfo)
            throws SemanticException {

        TypeInfo[] parameters = paramInfo.getParameters();

        if (parameters.length == 0) {
            if (!paramInfo.isAllColumns()) {
                throw new UDFArgumentException("Argument expected");
            }
            assert !paramInfo.isDistinct() : "DISTINCT not supported with *";
        } else {
            if (parameters.length > 1 && !paramInfo.isDistinct()) {
                throw new UDFArgumentException("DISTINCT keyword must be specified");
            }
            assert !paramInfo.isAllColumns() : "* not supported in expression list";
        }

        return new GenericUDAFCountEvaluator().setCountAllColumns(
                paramInfo.isAllColumns());
    }
    public static class GenericUDAFCountEvaluator extends GenericUDAFEvaluator {
        private boolean countAllColumns = false; //判断是否为所有的列,即*
        private LongObjectInspector partialCountAggOI;
        private LongWritable result;

        @Override
        public ObjectInspector init(Mode m, ObjectInspector[] parameters)
                throws HiveException {
            super.init(m, parameters);
            partialCountAggOI =
                    PrimitiveObjectInspectorFactory.writableLongObjectInspector;
            result = new LongWritable(0);  //初始为0
            return PrimitiveObjectInspectorFactory.writableLongObjectInspector;
        }

        private GenericUDAFCountEvaluator setCountAllColumns(boolean countAllCols) {
            countAllColumns = countAllCols;
            return this;
        }

        /** class for storing count value. */
        @AggregationType(estimable = true)
        static class CountAgg extends AbstractAggregationBuffer {
            long value;
            @Override
            public int estimate() { return JavaDataModel.PRIMITIVES2; }
        }

        @Override
        public AggregationBuffer getNewAggregationBuffer() throws HiveException {
            CountAgg buffer = new CountAgg();
            reset(buffer);
            return buffer;
        }

        @Override
        public void reset(AggregationBuffer agg) throws HiveException {
            ((CountAgg) agg).value = 0;
        }

        @Override  //map阶段
        public void iterate(AggregationBuffer agg, Object[] parameters)
                throws HiveException {
            // parameters == null means the input table/split is empty
            if (parameters == null) {
                return;
            }
            if (countAllColumns) {  //判断是否为全部的列
                assert parameters.length == 0;
                ((CountAgg) agg).value++;
            } else {
                boolean countThisRow = true; //若非全部的列,则需要判断相应列是否为null
                for (Object nextParam : parameters) {
                    if (nextParam == null) {
                        countThisRow = false;
                        break;
                    }
                }
                if (countThisRow) {
                    ((CountAgg) agg).value++;
                }
            }
        }

        @Override //reducer阶段
        public void merge(AggregationBuffer agg, Object partial)
                throws HiveException {
            if (partial != null) {
                long p = partialCountAggOI.get(partial);
                ((CountAgg) agg).value += p;
            }
        }

        @Override
        public Object terminate(AggregationBuffer agg) throws HiveException {
            result.set(((CountAgg) agg).value);
            return result;
        }

        @Override
        public Object terminatePartial(AggregationBuffer agg) throws HiveException {
            return terminate(agg);
        }
    }
}

测试结果

这个返回所有行数的udaf操作没有截图。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值