批量插入千万数据到Elasticsearch之bulkProcessor

1.下面是我的代码,之前测试400万的数据到es成功,后面到生产数据1300万的数据导入es的时候出现连接超时错误,io错误;

public static void bulkDeleteByUserNoRequest(String index, List<String> userNos) throws IOException {
    //创建ES客户端
    try (RestHighLevelClient client = getClient()) {
        BulkProcessor.Listener listener = new BulkProcessor.Listener() {
            @Override
            public void beforeBulk(long executionId, BulkRequest request) {
                // 执行之前调用
                System.out.println("操作" + request.numberOfActions() + "条数据");
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                // 执行之后调用
                System.out.println("成功" + request.numberOfActions() + "条数据,用时" + response.getTook());
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                // 失败时调用
                System.out.println("失败" + request.numberOfActions() + "条数据");
                System.out.println("失败" + failure);
            }
        };
        BulkProcessor bulkProcessor = BulkProcessor.builder(
                (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener),
                listener)
                .setBulkActions(5000)
                .setBulkSize(new ByteSizeValue(5L, ByteSizeUnit.MB))
                .setFlushInterval(TimeValue.timeValueSeconds(10L))
                .setConcurrentRequests(10)
                .setBackoffPolicy(BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3))
                .build();

        SearchRequest searchRequest = new SearchRequest(index);
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        searchSourceBuilder.query(QueryBuilders.termsQuery("userNo", userNos));
        searchSourceBuilder.size(10000);
        searchRequest.source(searchSourceBuilder);

        SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
        if (searchResponse.getHits().getTotalHits().value == 0) {
            return;
        }
        if (searchResponse.getHits().getTotalHits().value > 0) {
            for (SearchHit hit : searchResponse.getHits().getHits()) {
                bulkProcessor.add(new DeleteRequest(index).id(hit.getId()));
            }
            bulkProcessor.flush();
            bulkProcessor.close();
            client.close();
        }

2.遇到的问题连接超时和io问题,这是因为发起的线程太多了上面我设置了10个线程,一个线程5000的数据,.setBulkActions(5000)  .setConcurrentRequests(10) 因为线程太多导致有一个线程不成功的话就会导致缺少数据,后面解决把.setBulkActions(2000)  .setConcurrentRequests(1)就成功导入1000万数据成功了

3.遇到的坑

最后一次如果没有达到5000的不会发送请求,手动刷新一次.flush,关闭客户端不能立马关闭不然会出现I/O异常,使用bulkProcessor.awaitClose( timeout: executeTime/1000,TimeUnit.SECoNDS);关闭,加在flush的后面一行

888

 4.最终代码成功导入一千万数据到es完整代码耗时30分钟

 public static void bulksaveByUserNoRequest(String index, List<String> userNos) throws IOException {
        //创建ES客户端
        try (RestHighLevelClient client = getClient()) {
            BulkProcessor.Listener listener = new BulkProcessor.Listener() {
                @Override
                public void beforeBulk(long executionId, BulkRequest request) {
                    // 执行之前调用
                    System.out.println("操作" + request.numberOfActions() + "条数据");
                }

                @Override
                public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                    // 执行之后调用
                    System.out.println("成功" + request.numberOfActions() + "条数据,用时" + response.getTook());
                }

                @Override
                public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                    // 失败时调用
                    System.out.println("失败" + request.numberOfActions() + "条数据");
                    System.out.println("失败" + failure);
                }
            };
            BulkProcessor bulkProcessor = BulkProcessor.builder(
                            (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener),
                            listener)
                    .setBulkActions(2000)
                    .setBulkSize(new ByteSizeValue(5L, ByteSizeUnit.MB))
                    .setFlushInterval(TimeValue.timeValueSeconds(10L))
                    .setConcurrentRequests(2)
                    .setBackoffPolicy(BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3))
                    .build();

            try {
                for (Map tempMap : list) {
                    IndexRequest indexRequest = new IndexRequest(indexName)
                            .id((String) tempMap.get("userNo")).source(JSON.toJSONString(tempMap), XContentType.JSON);
                    bulkProcessor.add(indexRequest);
                }
                bulkProcessor.flush();
                bulkProcessor.awaitClose(5, TimeUnit.SECONDS);
            } catch (Exception e) {
                log.error("错误信息:{}", e.getMassage)
            } finally {
                if (bulkProcessor != null) {
                    bulkProcessor.close();
                }
                if (client != null) {
                    client.close();
                }
            }
        } catch (Exception e) {
            log.error("错误信息:{}", e.getMassage)
        }
    }
  • 5
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值