kafka作为一种常用的消息队列,经历了大家的检验。他的技术架构(集群)如下所示:
一个集群可以有多个broker,每台机器都部署了kafka,作为一个broker。他们可以接收同一个topic的消息,可以根据partation来区分消息。接收方可以订阅对应的topic的partation。订阅方式有两种,一种是kafka给消费方推送消息,这种方式kafka需要自己维护根据消费方回调的各个数据是否正常被消费,复杂度过高,所以不推荐;一种是消费方主动从kafka拉取消息,这种方式需要死循环调用。
打算采用golang的方式来实现,用了大部分人用的包sarama,github.com/Shopify/sarama。
这个编译之后可以直接发送消息,也可以指定partation来消费。
./main -h
./main -command producer -host 192.168.147.61:9092 -topic my_test 作为生产者
./main -command consumer -host 192.168.147.61:9092 -topic my_test --partition 0 作为消费者
package main
import (
"flag"
"fmt"
"log"
"os"
"io/ioutil"
"bufio"
"strings"
"crypto/tls"
"crypto/x509"
"github.com/Shopify/sarama"
)
var (
command string
hosts string
topic string
partition int
saslEnable bool
username string
password string
tlsEnable bool
clientcert string
clientkey string
cacert string
)
func main() {
flag.StringVar(&command, "command", "consumer", "consumer|producer")
flag.StringVar(&hosts, "host", "localhost:9093", "Common separated kafka hosts")
flag.StringVar(&topic, "topic", "test--topic", "Kafka topic")
flag.IntVar(&partition, "partition", 0, "Kafka topic partition")
flag.BoolVar(&saslEnable, "sasl", false, "SASL enable")
flag.StringVar(&username, "username", "", "SASL Username")
flag.StringVar(&password, "password", "", "SASL Password")
flag.BoolVar(&tlsEnable, "tls", false, "TLS enable")
flag.StringVar(&clientcert, "cert", "cert.pem", "Client Certificate")
flag.StringVar(&clientkey, "key", "key.pem", "Client Key")
flag.StringVar(&cacert, "ca", "ca.pem", "CA Certificate")
flag.Parse()
config := sarama.NewConfig()
if saslEnable {
config.Net.SASL.Enable = true
config.Net.SASL.User = username
config.Net.SASL.Password = password
}
if tlsEnable {
//sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
tlsConfig, err := genTLSConfig(clientcert, clientkey, cacert)
if err != nil {
log.Fatal(err)
}
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
client, err := sarama.NewClient(strings.Split(hosts, ","), config)
if err != nil {
log.Fatalf("unable to create kafka client: %q", err)
}
if command == "consumer" {
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
log.Fatal(err)
}
defer consumer.Close()
loopConsumer(consumer, topic, partition)
} else {
producer, err := sarama.NewAsyncProducerFromClient(client)
if err != nil {
log.Fatal(err)
}
defer producer.Close()
loopProducer(producer, topic, partition)
}
}
func genTLSConfig(clientcertfile, clientkeyfile, cacertfile string) (*tls.Config, error) {
// load client cert
clientcert, err := tls.LoadX509KeyPair(clientcertfile, clientkeyfile)
if err != nil {
return nil, err
}
// load ca cert pool
cacert, err := ioutil.ReadFile(cacertfile)
if err != nil {
return nil, err
}
cacertpool := x509.NewCertPool()
cacertpool.AppendCertsFromPEM(cacert)
// generate tlcconfig
tlsConfig := tls.Config{}
tlsConfig.RootCAs = cacertpool
tlsConfig.Certificates = []tls.Certificate{clientcert}
tlsConfig.BuildNameToCertificate()
// tlsConfig.InsecureSkipVerify = true // This can be used on test server if domain does not match cert:
return &tlsConfig, err
}
func loopProducer(producer sarama.AsyncProducer, topic string, partition int) {
scanner := bufio.NewScanner(os.Stdin)
fmt.Print("> ")
for scanner.Scan() {
text := scanner.Text()
if text == "" {
} else if text == "exit" || text == "quit" {
break
} else {
producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(text)}
log.Printf("Produced message: [%s]\n",text)
}
fmt.Print("> ")
}
}
func loopConsumer(consumer sarama.Consumer, topic string, partition int) {
partitionConsumer, err := consumer.ConsumePartition(topic, int32(partition), sarama.OffsetNewest)
if err != nil {
log.Println(err)
return
}
defer partitionConsumer.Close()
for {
log.Printf("test-print-start")
msg := <-partitionConsumer.Messages()
log.Printf("test-print-end")
log.Printf("Consumed message: [%s], offset: [%d]\n", msg.Value, msg.Offset)
}
}
集群整体所有分区的获取方式为
集群上所有机器发的消息,所有的partation他都能收到
package main
import (
"context"
"fmt"
"github.com/Shopify/sarama"
"log"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
defer func() {
time.Sleep(time.Second)
}()
// sarama的logger
sarama.Logger = log.New(os.Stdout, fmt.Sprintf("[%s]", "consumer"), log.LstdFlags)
//获取默认配置
config := sarama.NewConfig()
config.Version = sarama.V2_0_0_0
config.Consumer.Offsets.Initial = sarama.OffsetNewest
//kafka consumer client
ctx, cancel := context.WithCancel(context.Background())
client, err := sarama.NewConsumerGroup([]string{"192.168.147.61:9092"}, "group.kezhonglsclog", config)
if err != nil {
panic(err)
}
consumer := Consumer{}
go func() {
for {
err := client.Consume(ctx, []string{"my_test"}, &consumer)
if err != nil {
// 5秒后重试
time.Sleep(time.Second * 5)
}
}
}()
// os signal
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
<-sigterm
cancel()
err = client.Close()
if err != nil {
panic(err)
}
}
type Consumer struct {
}
func (consumer *Consumer) Setup(s sarama.ConsumerGroupSession) error {
return nil
}
func (consumer *Consumer) Cleanup(s sarama.ConsumerGroupSession) error {
return nil
}
//具体的处理方法
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
key := string(message.Key)
val := string(message.Value)
topic := string(message.Topic)
partition := message.Partition
offset := message.Offset
log.Printf("Consumed topic: [%s], partation: [%d], offset: [%d], key: [%s], value: [%s]\n",
topic,partition,offset,key, val)
session.MarkMessage(message, "")
}
return nil
}