本文主要对kafka consumer消费客户端具体流程相关sarama及sarama-cluster源码解析。
源码获取方式:
go get github.com/Shopify/sarama
go get github.com/bsm/sarama-cluster
主要包括:
- 初始化配置:客户端创建,初始化topic相关partition、partition-leader-broker信息
- 消费者创建:包括全局consumer创建,partitionConsumer创建、brokerConsumer创建
- 消息消费:按topic、topic中各个partition循环消费,消费消息汇聚初始化consumer,全局consumer获取消息并标记offset
- 消费监测:心跳监测,topic监测,topic下partition监测,以及offset提交
文章目录
一:初始化配置
1.1 topic相关信息获取
依据bootstrap-addr获取topic下broker及各partition对应主broker、从broker、下线broker信息。
//NewClient create a new Client.It connects to one of the given broker addresses and uses that broker to automatically fetch metadata on the ret of the kafka cluster.
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
此处省略
for broker := client.any(); broker != nil; broker = client.any() {
//client.any依据bootstrap-addr建立Dial连接,获取可用broker
req := &MetadataRequest{
Topics: topics}
if client.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 1
}
response, err := broker.GetMetadata(req) //sendAndReceive
其中respone struct{
Brokers []*Broker //可获取信息的broker
ClusterID *string
ControllerID int32
Topics []*TopicMetadata
}
其中TopicMetaData struct{
Name string
IsInternal bool // Only valid for Version >= 1
Partitions []*PartitionMetadata
}
其中PartitionMetadata struct{
ID int32
Leader int32
Replicas []int32
OfflineReplicas []int32
}
此处省略
client.updateMetadata(response, allKnownMetaData)//将response按map[topic][partitionId]*PartitionMetadata存储
}
}
二:消费者创建
首先创建全局consumer,按topic及partition创建并发送至partitionConsumer,各partitionConsumer按leader-broker创建brokerConsumer,brokerConsumer周期性发起fetch请求,消费数据。
2.1 全局consumer
func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) {
consumer, err := sarama.NewConsumerFromClient(client.Client)
//此处省略
if err := c.client.RefreshCoordinator(groupID); err != nil {
//获取同一groupId下协调员broker信息,以map[groupId]broker
client.release()
return nil, err
}
go c.mainLoop() //其中以map[topic][partition]并发createConsumer
return c, nil
}
2.2 partitionConsumer
//part of mainLoop
func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error {
// fetch offsets
offsets, err := c.fetchOffsets(subs) //fetch latest commit offsets as map[topic][partition]offset,
// create consumers in parallel
for topic, partitions := range subs {
for _, partition := range partitions {
info := offsets[topic][partition]
go func(topic string, partition int32) {
if e := c.createConsumer(tomb, topic, partition, info); e != nil {
//实际创建partitionConsumer
……
}
}(topic, partition)
}
}
//此处省略
}
func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error {
// Create partitionConsumer
pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial) //consumer.ConsumerPartition()消费
if err != nil {
return err
}
// Store partitionConsumer in subscriptions
c.subs.Store(topic, partition, pc)
// Start partition consumer goroutine
tomb.Go(func(stopper <-chan none) {
if c.client.config.Group.Mode == ConsumerModePartitions {
pc.waitFor(stopper, c.errors)
} else {
pc.multiplex(stopper, c.messages, c.errors) //汇聚partitionConumser.Message to consumer.Message
}
})
return nil
}
func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) {
offset := info.NextOffset(defaultOffset)
pcm, err := manager.ConsumePartition(topic, partition, offset) //creates a PartitionConsumer on the given topic/partition with the given offset. offset can be a literal offset, or OffsetNewest or OffsetOldest.
此处省略
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
}
if err