kafka 存储-replicamanager写数据

入口:处理生产者写入数据请求 KafkaApi.handleProducerRequest  
-> replicaManager.appendMessages()
  def appendMessages(timeout: Long,
                     requiredAcks: Short,
                     internalTopicsAllowed: Boolean,
                     messagesPerPartition: Map[TopicPartition, MessageSet],
                     responseCallback: Map[TopicPartition, PartitionResponse] => Unit) {

    // acks应该就是判断传过来的acks的参数是否合法
    if (isValidRequiredAcks(requiredAcks)) {
      val sTime = SystemTime.milliseconds

      //TODO 把数据追加到本地日志里面
      //localProduceResults 这个就是服务端写完消息以后的处理的结果。
      val localProduceResults = appendToLocalLog(internalTopicsAllowed, messagesPerPartition, requiredAcks)
      debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))

      //根据写日志返回来的结果,去封装返回客户端的响应。
      //目前为止数据只是写入了leader partition
      val produceStatus = localProduceResults.map { case (topicPartition, result) =>
        topicPartition ->
                ProducePartitionStatus(
                  result.info.lastOffset + 1, // required offset
                  new PartitionResponse(result.errorCode, result.info.firstOffset, result.info.logAppendTime)) // response status
      }
      //acks = -1
      if (delayedRequestRequired(requiredAcks, messagesPerPartition, localProduceResults)) {
        // create delayed produce operation
        val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
        val delayedProduce = new DelayedProduce(timeout, produceMetadata, this, responseCallback)

        // create a list of (topic, partition) pairs to use as keys for this delayed produce operation
        val producerRequestKeys = messagesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toSeq

        // try to complete the request immediately, otherwise put it into the purgatory
        // this is because while the delayed produce operation is being created, new
        // requests may arrive and hence make this operation completable.
        //时间轮概念 -》 延迟调度。唤醒  follower partition -》 从leader partition
        delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys)

      } else {
        // we can respond immediately
        val produceResponseStatus = produceStatus.mapValues(status => status.responseStatus)
        responseCallback(produceResponseStatus)
      }
    } else {
      // If required.acks is outside accepted range, something is wrong with the client
      // Just return an error and don't handle the request at all
      val responseStatus = messagesPerPartition.map {
        case (topicAndPartition, messageSet) =>
          //封装响应结果
          //只不过封装响应结果的时候,带上一个INVALID_REQUIRED_ACKS 的异常码
          //这样的话,我们前端用户那儿捕获到这样的异常以后就就知道如何处理了。
          topicAndPartition -> new PartitionResponse(Errors.INVALID_REQUIRED_ACKS.code,
            LogAppendInfo.UnknownLogAppendInfo.firstOffset, Message.NoTimestamp)
      }

      //最终调用回调函数
      //就是靠这个调用回调函数给客户端返回响应结果。
      responseCallback(responseStatus)
    }
  }

-> appendToLocalLog
把数据追加到本地日志里面
  private def appendToLocalLog(internalTopicsAllowed: Boolean,
                               messagesPerPartition: Map[TopicPartition, MessageSet],
                               requiredAcks: Short): Map[TopicPartition, LogAppendResult] = {
    trace("Append [%s] to local log ".format(messagesPerPartition))
    //遍历每个分区
    messagesPerPartition.map { case (topicPartition, messages) =>
      BrokerTopicStats.getBrokerTopicStats(topicPartition.topic).totalProduceRequestRate.mark()
      BrokerTopicStats.getBrokerAllTopicsStats().totalProduceRequestRate.mark()

      // reject appending to internal topics if it is not allowed
      //代表的意思就是如果我们要写入数据的topic是 kafka内部的topic
      //那么就走这个分支。
      //__consumer_offsets
      //consunmer -> kafka -> offset  -> 0.8kafka(ZK)  0.8 以后的版本
      //consuemr -> kafka -> offset -> __consumer_offsets(主题)
      if (Topic.isInternal(topicPartition.topic) && !internalTopicsAllowed) {
        (topicPartition, LogAppendResult(
          LogAppendInfo.UnknownLogAppendInfo,
          Some(new InvalidTopicException("Cannot append to internal topic %s".format(topicPartition.topic)))))
      } else {
        //这儿就是我们要写数据的分支
        try {
          //找到对应写数据的分区
          val partitionOpt = getPartition(topicPartition.topic, topicPartition.partition)
          val info = partitionOpt match {
            case Some(partition) =>
              //TODO 把数据写到leader partition里面
              partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet], requiredAcks)
            case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
              .format(topicPartition, localBrokerId))
          }

--> appendMessagesToLeader  
把数据写到leader partition里面
          //使用log对象去写数据
          val info = log.append(messages, assignOffsets = true)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值