ElasticSearch 聚合查询 JavaApi

14 篇文章 1 订阅
1 篇文章 0 订阅

ES 聚合查询Java Api

分页查询,按照时间进行分组查询求平均(多平均),按照某一字段分组求聚合等等

  1. 分页查询
        SysUser loginUser = SecurityUtils.getLoginUser().getUser();
        Long userId = loginUser.getUserId();
        LambdaQueryChainWrapper<Warning> query = warningService.lambdaQuery();
        if (req.getStatus()!=null && req.getStatus().size() > 0){
            if (req.getStatus().contains(null)){
                throw new BaseException("错误的'处理状态'字段信息!");
            }else {
                query.in(Warning::getProcessStatus, req.getStatus());
            }
        }
        //query.in(req.getStatus()!=null && req.getStatus().size() > 0,Warning::getProcessStatus, req.getStatus());
        query.eq(StringUtils.isNotEmpty(req.getSiteId()), Warning::getDeptId, req.getSiteId());
        query.like(StringUtils.isNotEmpty(req.getDevCode()), Warning::getDevCode, req.getDevCode());
        query.like(StringUtils.isNotEmpty(req.getDevName()), Warning::getDevName, req.getDevName());

        List<WarningProcess> processes = warningProcessService.getWarningProcess(userId);
        if (processes == null || processes.size() == 0)// return new TableDataInfo();//没有处理人的话,就按照当前登录人的站点权限过滤
        {
            List<Long> userDepts = userService.queryUserDepts(userId);
            query.in(Warning::getDeptId, userDepts);
        }
        else{
            List<Long> warnIds = processes.stream().map(m -> m.getWarnId()).collect(Collectors.toList());
            query.in(CollectionUtils.isNotEmpty(warnIds), Warning::getId, warnIds);
        }

        query.orderByDesc(Warning::getWarnTime);
        TableDataInfo data = warningService.page(req, query);

        List<SysDept> depts = sysDeptService.list();
        data.getRows().stream().forEach(item -> {

            Warning warnItem = (Warning) item;
            SysDept dept = depts.stream().filter(m -> m.getDeptId().equals(warnItem.getDeptId())).findFirst().orElse(null);
            if (null != dept) {
                warnItem.setDeptName(dept.getDeptName());
            }
        });
        return data;

mybatisPlus 转换成es查询的JavaApi

SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder()
				//从哪到哪
                .from((req.getPageNum() - 1) * req.getPageSize())
                //数量
                .size(req.getPageSize())
                //按照warn_time 倒序排序
                .sort("warn_time", SortOrder.DESC)
                .trackTotalHits(true);
        BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
        //相当于sql 的 in 操作
        if (req.getStatus()!=null && req.getStatus().size() > 0){
			boolQueryBuilder.filter(QueryBuilders.termsQuery("process_status",req.getStatus()));
        }
        //term 精确匹配
        if (StringUtils.isNotEmpty(req.getSiteId())){
            boolQueryBuilder.filter(QueryBuilders.termQuery("dept_id",req.getSiteId()));
        }
        //模糊查询
        if (StringUtils.isNotEmpty(req.getDevCode())){
            boolQueryBuilder.filter(QueryBuilders.wildcardQuery("dev_code.keyword","*"+req.getDevCode()+"*"));
        }
        if (StringUtils.isNotEmpty(req.getDevName())){
            boolQueryBuilder.filter(QueryBuilders.wildcardQuery("dev_name.keyword","*"+req.getDevName()+"*"));
        }
        List<WarningProcess> processes = warningProcessService.getWarningProcess(userId);
        if (processes == null || processes.size() == 0)// return new TableDataInfo();//没有处理人的话,就按照当前登录人的站点权限过滤
        {
            List<Long> userDepts = userService.queryUserDepts(userId);
            boolQueryBuilder.filter(QueryBuilders.termsQuery("dept_id.keyword",userDepts));
        }
        else{
            List<Long> warnIds = processes.stream().map(m -> m.getWarnId()).collect(Collectors.toList());
            if (CollectionUtils.isNotEmpty(warnIds)){
                //ES in操作 termsQuery
                boolQueryBuilder.filter(QueryBuilders.termsQuery("id",warnIds));
            }
        }
		//进行查询
        searchSourceBuilder.query(boolQueryBuilder);
        //返回结果
        SearchResponse searchResponse = EsClintUtil.execSearch(restHighLevelClient,searchSourceBuilder, index);
        //返回总数
        long total = searchResponse.getHits().getTotalHits();
        List<Warning> list = new ArrayList<>();
        final SearchHit[] searchHits = searchResponse.getHits().getHits();
        for (SearchHit searchHit : searchHits) {
            list.add(JSON.parseObject(searchHit.getSourceAsString(), Warning.class));
        }
        //返回自定义表格信息
        TableDataInfo info = new TableDataInfo(list, (int) total);
  1. 桶聚合
SELECT
dept_name,
count(d1.id) as imgcount,
count(distinct d1.device_code) as imgcount1,
sum(case d1.`status` when '1' then 1 ELSE 0 END) as warningCount
FROM   sys_dept s1
LEFT JOIN dev_shock_data d1 ON s1.dept_id = d1.dept_id
where s1.dept_id!=100
GROUP BY s1.dept_name

数据库的sql对应的Java代码

SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
        boolQueryBuilder.mustNot(QueryBuilders.termQuery("dept_id", 100));
        searchSourceBuilder.query(boolQueryBuilder);
        searchSourceBuilder.size(0);//不输出原始数据

        //按照站点分组
        TermsAggregationBuilder aggregationBuilder = AggregationBuilders
                .terms("deptgroup").field("dept_id")  //桶分组
                .subAggregation(AggregationBuilders.terms("statusgroup").field("status.keyword"))
                //设备个数去重数量
                .subAggregation(AggregationBuilders.cardinality("imgcount1").field("device_code.keyword"))
                .size(5000);
        searchSourceBuilder.aggregation(aggregationBuilder);

        SearchResponse searchResponse = EsClintUtil.execSearch(restHighLevelClient,searchSourceBuilder, "dev_shock_data");
        // 遍历封装列表对象
        List<ImageStatisticRes> resList=new ArrayList<>();
        Terms parsedStringTerms =searchResponse.getAggregations().get("deptgroup");
        List<? extends Terms.Bucket> buckets = parsedStringTerms.getBuckets();
        Map<String,String> deptMap= EsClintUtil.getDepts(restHighLevelClient);
        List<String> list = new ArrayList<>();
        for (Terms.Bucket bucket : buckets) {
            //key的数据
            String key = bucket.getKey().toString();
            long docCount = bucket.getDocCount();
            //获取数据
            ImageStatisticRes res = new ImageStatisticRes();
            res.setSiteName(deptMap.get(key));
            res.setImageCount((int) docCount);
            //获取设备个数
            Cardinality imagecount1 = bucket.getAggregations().get("imgcount1");
            res.setImageCount1((int) imagecount1.getValue());
            Terms statusgroupTearms = bucket.getAggregations().get("statusgroup");
            for (Terms.Bucket statusBucket : statusgroupTearms.getBuckets()) {
                if (statusBucket.getKey().equals("1")) {
                    res.setImageWarningCount((int) statusBucket.getDocCount());
                }
            }
            resList.add(res);
            list.add(key);
        }
        //获取到总的站台数据
        final List<String> deptIds = deptMap.keySet().stream().collect(Collectors.toList());

        //获取差集
        final List<String> collect = deptIds.stream()
                .filter(item -> !list.contains(item))
                .collect(Collectors.toList());

        //遍历得到的差集就是不存在站台数据的
        for (String s : collect) {
            ImageStatisticRes res = new ImageStatisticRes();
            res.setSiteName(deptMap.get(s));
            res.setImageCount(0);
            res.setImageWarningCount(0);
            resList.add(res);
        }

        return resList;
  1. 时间聚合分组
SELECT HOUR (create_time) khour ,avg(angle_x) averageX,  avg(angle_y) averageY, avg(angle_z) averageZ  from dev_shock_data
		 where device_id=#{deviceId} and create_time &gt;#{startTime} and  create_time &lt;#{endTime}
		  group by HOUR (create_time)
		  order by khour

数据库的sql转换成es对应的JavaApi

final ArrayList<GroupByHour> groupByHours = new ArrayList<>();
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder()
                .sort("create_time", SortOrder.ASC)
                .trackTotalHits(true);
        BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
        boolQueryBuilder.must(QueryBuilders.termQuery("device_id",deviceId));
        boolQueryBuilder.filter(QueryBuilders.rangeQuery("create_time").gt(startTime).lt(endTime));
        searchSourceBuilder.size(0);
        searchSourceBuilder.query(boolQueryBuilder);
        AvgAggregationBuilder  averageX = AggregationBuilders.avg("averageX").field(x);
        AvgAggregationBuilder  averageY = AggregationBuilders.avg("averageY").field(y);
        AvgAggregationBuilder  averageZ = AggregationBuilders.avg("averageZ").field(z);
        AggregationBuilder  groupTime = AggregationBuilders.dateHistogram("khour")
                .field("create_time") // 根据date字段值,对数据进行分组
                // 时间分组间隔:DateHistogramInterval.* 序列常量,支持每月,每年,每天等等时间间隔
                .dateHistogramInterval(DateHistogramInterval.HOUR)
                // 设置返回结果中桶key的时间格式
                .format("HH")
//              .timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("GMT%2B8")))
//              .timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("GMT+8:00")))
                .minDocCount(0L)
                .subAggregation(averageX)
                .subAggregation(averageY)
                .subAggregation(averageZ);
        searchSourceBuilder.aggregation(groupTime);
        final SearchResponse dev_shock_data = EsClintUtil.execSearch(restHighLevelClient, searchSourceBuilder, "dev_shock_data");
        ParsedDateHistogram khour = dev_shock_data.getAggregations().get("khour");
        final List<? extends Histogram.Bucket> buckets1 = khour.getBuckets();
        for (Histogram.Bucket bucket : buckets1) {
            final GroupByHour groupByHour = new GroupByHour();
            if(bucket.getDocCount()>0){
                groupByHour.setHour(Integer.parseInt(bucket.getKeyAsString()));
                Avg xAvg = bucket.getAggregations().get("averageX");
                Avg yAvg = bucket.getAggregations().get("averageY");
                Avg zAvg = bucket.getAggregations().get("averageZ");
                groupByHour.setAverageX(xAvg.getValue());
                groupByHour.setAverageY(yAvg.getValue());
                groupByHour.setAverageZ(zAvg.getValue());
                groupByHours.add(groupByHour);
            }
        }
        return groupByHours;

欢迎大家前来提需求!

  • 0
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
Elasticsearch 聚合查询(Aggregation)是一种用于对数据进行多维度分析的功能。聚合查询可以用于分析数据的分布情况、计算数据的统计信息、生成图表等。在 Elasticsearch 中,聚合查询是通过使用特定的聚合器(Aggregator)来完成的。 Java 中使用 Elasticsearch 聚合查询需要使用 Elasticsearch Java API。首先需要创建一个 SearchRequest 对象,并设置需要查询的索引和查询条件。然后创建一个 AggregationBuilder 对象,用于定义聚合查询的规则和参数。最后将 AggregationBuilder 对象添加到 SearchRequest 中,执行查询操作。 以下是一个简单的 Java 代码示例,用于查询某个索引下的数据,并按照某个字段进行分组聚合查询: ``` SearchRequest searchRequest = new SearchRequest("index_name"); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); TermsAggregationBuilder aggregationBuilder = AggregationBuilders.terms("group_by_field").field("field_name"); searchSourceBuilder.aggregation(aggregationBuilder); searchRequest.source(searchSourceBuilder); SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); Terms terms = searchResponse.getAggregations().get("group_by_field"); for (Terms.Bucket bucket : terms.getBuckets()) { String key = bucket.getKeyAsString(); long count = bucket.getDocCount(); System.out.println("key: " + key + ", count: " + count); } ``` 在上面的代码中,首先创建了一个 SearchRequest 对象,设置需要查询的索引和查询条件。然后创建了一个 TermsAggregationBuilder 对象,用于按照某个字段进行分组聚合查询。最后将 TermsAggregationBuilder 对象添加到 SearchRequest 中,执行查询操作。 查询结果会返回一个 Terms 对象,其中包含了分组聚合查询的结果。可以使用 Terms 对象的 getBuckets() 方法获取分组聚合查询的结果列表。每个分组聚合查询结果由一个 Terms.Bucket 对象表示,其中包含了分组聚合查询的键值和文档数量等信息。 以上是简单的聚合查询示例,Elasticsearch 聚合查询功能非常强大,支持多种聚合器和聚合规则,可以根据具体需求进行调整和扩展。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值