Elasticsearch写入流程源码分析(一)

基于Elasticsearch 7.2

​ ES如果需要对索引进行写入,更新,删除,需要用到 Index APIDelete APIUpdate API,也可使用Bulk API多条进行更新,核心源码类是【org.elasticsearch.action.bulk.TransportBulkAction】,接下来先看一个HTTP请求怎么到达这个类。

  1. 在【org.elasticsearch.rest.action.document】这个包下面具体注册了一些rest请求的格式,定义了怎样的一个请求算Index或者Bulk请求,下面以Index为例:
public RestIndexAction(Settings settings, RestController controller) {
        super(settings);
  			//POST 不指定ID则会自动创建一个ID,自动创建ID的方法在下面会说
        controller.registerHandler(POST, "/{index}/_doc", this); // auto id creation
  			//使用POST 或者 PUT 指定ID去索引一个文档(_doc为默认的type)
        controller.registerHandler(PUT, "/{index}/_doc/{id}", this);
        controller.registerHandler(POST, "/{index}/_doc/{id}", this);

  			//使用create时,如果索引中已经存在具有该ID的文档,则索引操作将失败。
        CreateHandler createHandler = new CreateHandler(settings);
        controller.registerHandler(PUT, "/{index}/_create/{id}", createHandler);
        controller.registerHandler(POST, "/{index}/_create/{id}/", createHandler);

        // Deprecated typed endpoints.
  			// 使用自定义的type进行索引文档,将要在8.0后废除type
        controller.registerHandler(POST, "/{index}/{type}", this); // auto id creation
        controller.registerHandler(PUT, "/{index}/{type}/{id}", this);
        controller.registerHandler(POST, "/{index}/{type}/{id}", this);
        controller.registerHandler(PUT, "/{index}/{type}/{id}/_create", createHandler);
        controller.registerHandler(POST, "/{index}/{type}/{id}/_create", createHandler);
}
  1. 这个类继承了BaseRestHandler,所执行的动作在prepareRequest继承的这个方法里面实现:
  @Override
    public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
        IndexRequest indexRequest;
//        获取操作的索引type
        final String type = request.param("type");
//        如果不等于默认的_doc
        if (type != null && type.equals(MapperService.SINGLE_MAPPING_NAME) == false) {
//            增加提示信息,表示type将要在8.0后废除
            deprecationLogger.deprecatedAndMaybeLog("index_with_types", TYPES_DEPRECATION_MESSAGE);
//            构建一个IndexRequest  一个index请求的对象
            indexRequest = new IndexRequest(request.param("index"), type, request.param("id"));
        } else {
            //使用这个没有标注要废弃的构造方法 不指定type
            indexRequest = new IndexRequest(request.param("index"));
            indexRequest.id(request.param("id"));
        }
//        通过在请求中拿到参数值赋予请求对象,这些都是在url中后面带的参数
        indexRequest.routing(request.param("routing"));
        indexRequest.setPipeline(request.param("pipeline"));
        indexRequest.source(request.requiredContent(), request.getXContentType());
        indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
        indexRequest.setRefreshPolicy(request.param("refresh"));
        indexRequest.version(RestActions.parseVersion(request));
        indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType()));
        indexRequest.setIfSeqNo(request.paramAsLong("if_seq_no", indexRequest.ifSeqNo()));
        indexRequest.setIfPrimaryTerm(request.paramAsLong("if_primary_term", indexRequest.ifPrimaryTerm()));
        String sOpType = request.param("op_type");
        String waitForActiveShards = request.param("wait_for_active_shards");
        if (waitForActiveShards != null) {
//            可以指定数字和_all
            indexRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
        }
//        设置操作类型
        if (sOpType != null) {
            indexRequest.opType(sOpType);
        }
        
//        调用client index操作   这里的channel表示的是RestChannel 
//        REST请求是通过准备一个通道使用者来处理的,该使用者代表针对通道的请求执行情况
//        定义了一个rest请求相关的监听器
        return channel ->
                client.index(
                    indexRequest,
                    new RestStatusToXContentListener<>(channel, r -> r.getLocation(indexRequest.routing()))
                );
    }
  1. 再看下index这个函数: (org.elasticsearch.client.support.AbstractClient => org.elasticsearch.client.node.NodeClient)
    @Override
//    执行index操作
    public void index(final IndexRequest request, final ActionListener<IndexResponse> listener) {
        execute(IndexAction.INSTANCE, request, listener);
    }

    /**
     * This is the single execution point of *all* clients.
     * 表示所有客户端操作都会使用这个方法
     * 使用监听器的线程池对上面定义好的监听器进行分配
     * 在doExecute里面执行操作
     */
    @Override
    public final <Request extends ActionRequest, Response extends ActionResponse> void execute(
        Action<Response> action, Request request, ActionListener<Response> listener) {
        listener = threadedWrapper.wrap(listener);
        doExecute(action, request, listener);
    }

//    这个应该是具体的Client的实现去执行这个方法
    protected abstract <Request extends ActionRequest, Response extends ActionResponse>
    void doExecute(Action<Response> action, Request request, ActionListener<Response> listener);

//    该客户端的具体实现
    @Override
    public <Request extends ActionRequest, Response extends ActionResponse>
    void doExecute(Action<Response> action, Request request, ActionListener<Response> listener) {
        // Discard the task because the Client interface doesn't use it.
        executeLocally(action, request, listener);
    }

    /**
     * Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an 		   {@link ActionListener}. Prefer this
     * method if you don't need access to the task when listening for the response. This is the method used to implement the {@link Client}
     * interface.
     */
    public <    Request extends ActionRequest,
                Response extends ActionResponse
            > Task executeLocally(Action<Response> action, Request request, ActionListener<Response>        		listener) {
        return transportAction(action).execute(request, listener);
    }

//   到这里获取TransportAction使用这个对象执行execute请求
    private <    Request extends ActionRequest,
                Response extends ActionResponse
            > TransportAction<Request, Response> transportAction(Action<Response> action) {
        if (actions == null) {
            throw new IllegalStateException("NodeClient has not been initialized");
        }
        TransportAction<Request, Response> transportAction = actions.get(action);
        if (transportAction == null) {
            throw new IllegalStateException("failed to find action [" + action + "] to execute");
        }
        return transportAction;
    }
  1. 需要拿到对应action相关的TransportAction,这个是在org.elasticsearch.action.ActionModule将每一个auction和TransportAction组合成一个map注册进去的。这里index注册的是TransportAction这个类:
 actions.register(IndexAction.INSTANCE, TransportIndexAction.class);
  1. 再看下这个类的具体信息:
/**
 * Performs the index operation.
 *
 * Allows for the following settings:
 * <ul>
 * <li><b>autoCreateIndex</b>: When set to {@code true}, will automatically create an index if one does not exists.  是否可以自动创建索引
 * Defaults to {@code true}.
 * <li><b>allowIdGeneration</b>: If the id is set not, should it be generated. Defaults to {@code true}.
 * </ul>  是否可以自动生成id
 *
 * Deprecated use TransportBulkAction with a single item instead 单个索引操作也使用TransportBulkAction
 */
@Deprecated
public class TransportIndexAction extends TransportSingleItemBulkWriteAction<IndexRequest, IndexResponse> {

    @Inject
    public TransportIndexAction(ActionFilters actionFilters, TransportService transportService, TransportBulkAction bulkAction) {
        super(IndexAction.NAME, transportService, actionFilters, IndexRequest::new, bulkAction);
    }
}

//看下如何构造的这个类 TransportSingleItemBulkWriteAction
    protected TransportSingleItemBulkWriteAction(String actionName, TransportService transportService, 			 																									ActionFilters actionFilters,
                                                 Writeable.Reader<Request> requestReader, 			 		      		 																							 TransportBulkAction bulkAction) {
        super(actionName, transportService, actionFilters, requestReader);
      //最终其实是使用了TransportBulkAction这个类来执行index请求
        this.bulkAction = bulkAction;
    }
  1. 拿到这个类以后执行的方法是doExecute
		@Override
    protected void doExecute(Task task, final Request request, final ActionListener<Response> listener) {
        //调用TransportBulkAction的execute方法
        bulkAction.execute(task, toSingleItemBulkRequest(request), wrapBulkResponse(listener));
    }


//    相应转换为Bulk的相应
    public static <Response extends ReplicationResponse & WriteResponse>
    ActionListener<BulkResponse> wrapBulkResponse(ActionListener<Response> listener) {
        return ActionListener.wrap(bulkItemResponses -> {
            assert bulkItemResponses.getItems().length == 1 : "expected only one item in bulk request";
            BulkItemResponse bulkItemResponse = bulkItemResponses.getItems()[0];
            if (bulkItemResponse.isFailed() == false) {
                final DocWriteResponse response = bulkItemResponse.getResponse();
                listener.onResponse((Response) response);
            } else {
                listener.onFailure(bulkItemResponse.getFailure().getCause());
            }
        }, listener::onFailure);
    }

//    请求转换为bulk的请求
    public static BulkRequest toSingleItemBulkRequest(ReplicatedWriteRequest<?> request) {
//        将请求装换为只包含一个的请求的bulk操作
        BulkRequest bulkRequest = new BulkRequest();
        bulkRequest.add(((DocWriteRequest<?>) request));
        bulkRequest.setRefreshPolicy(request.getRefreshPolicy());
        bulkRequest.timeout(request.timeout());
        bulkRequest.waitForActiveShards(request.waitForActiveShards());
        request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE);
        return bulkRequest;
    }
}
  1. 具体真正进行写入操作的方法是在org.elasticsearch.action.bulk.TransportBulkAction中的doExecute这个方法。这一部分需要分段去研究。
//传递了一个Task任务相关的对象,一个bulk请求对象,也就是上面定义好的,以及上面定以好的请求结果的一个监听器
protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
}
  1. 首先是判断是否使用pipeline:
//获取一个不知道怎么实现的时间,这个接口好多实现
        final long startTime = relativeTime();
        //定义了一个原子读取写入的数组(可以存空值?)
        final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
        //请求是否使用pipeline
        boolean hasIndexRequestsWithPipelines = false;
        //获取集群的状态 metaData(需要看clusterService的实现)
        final MetaData metaData = clusterService.state().getMetaData();
        //看样子是获取了所有的索引信息,实现了一个map,存放所有索引信息
        ImmutableOpenMap<String, IndexMetaData> indicesMetaData = metaData.indices();
        //开始遍历bulk中的请求
        for (DocWriteRequest<?> actionRequest : bulkRequest.requests) {
            //调用上面的方法把请求都转换成IndexRequest
            IndexRequest indexRequest = getIndexWriteRequest(actionRequest);
            if (indexRequest != null) {
                // get pipeline from request
                String pipeline = indexRequest.getPipeline();
                //如果没有指定pipeline的话
                if (pipeline == null) {
                    // start to look for default pipeline via settings found in the index meta data
                    //查看索引有没有设置默认的pipeline
                    IndexMetaData indexMetaData = indicesMetaData.get(actionRequest.index());
                    if (indexMetaData == null && indexRequest.index() != null) {
                        // if the write request if through an alias use the write index's meta data
                        //看是不是使用了别名
                        AliasOrIndex indexOrAlias = metaData.getAliasAndIndexLookup().get(indexRequest.index());
                        if (indexOrAlias != null && indexOrAlias.isAlias()) {
                            AliasOrIndex.Alias alias = (AliasOrIndex.Alias) indexOrAlias;
                            //如果是使用了别名,则使用别名的索引信息
                            indexMetaData = alias.getWriteIndex();
                        }
                    }
                    if (indexMetaData != null) {
                        // Find the the default pipeline if one is defined from and existing index.
                        //获取默认的pipeline
                        String defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(indexMetaData.getSettings());
                        indexRequest.setPipeline(defaultPipeline);
                        //判断是否pipeline是_none的
                        if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) {
                            hasIndexRequestsWithPipelines = true;
                        }
                    } else if (indexRequest.index() != null) {
                        // No index exists yet (and is valid request), so matching index templates to look for a default pipeline
                        //没有索引符合查询请求,所以去找template 看有没有符合的索引名称的模板,看是否有符合的默认pipeline
                        List<IndexTemplateMetaData> templates = MetaDataIndexTemplateService.findTemplates(metaData, indexRequest.index());
                        assert (templates != null);
                        String defaultPipeline = IngestService.NOOP_PIPELINE_NAME;
                        // order of templates are highest order first, break if we find a default_pipeline
                        for (IndexTemplateMetaData template : templates) {
                            final Settings settings = template.settings();
                            //如果存在默认模板,则使用默认的模板,结束循环
                            if (IndexSettings.DEFAULT_PIPELINE.exists(settings)) {
                                defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(settings);
                                break;
                            }
                        }
                        indexRequest.setPipeline(defaultPipeline);
                        if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) {
                            hasIndexRequestsWithPipelines = true;
                        }
                    }
                } else if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) {
                    hasIndexRequestsWithPipelines = true;
                }
            }
        }
  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值