没有行动,懒惰就会生根发芽!!!
具体代码在:https://gitee.com/hjx_RuGuoYunZhiDao/strom-huang-go/tree/master/go_kafka
1、写在前面
本篇代码基本是ctrl+v 工程,原文代码是:https://www.shouxicto.com/article/334.html
万分感谢前辈提供的帮助,写着一篇主要是做一个自己的记录,同时看代码的时候理解代码才是自己的东西!!!
2、需要的环境
2.1、kafka
kafka 安装和使用在网上有很多教程,这里不做介绍
2.2、kafka 借助的git库
github.com/Shopify/sarama --kafka
github.com/bsm/sarama-cluster —kafka消费组
3、具体代码
3.1、 生产者
import (
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"time"
"github.com/Shopify/sarama"
"github.com/golang/glog"
)
//同步生产者
func Produce() {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll //赋值为-1:这意味着producer在follower副本确认接收到数据后才算一次发送完成。
config.Producer.Partitioner = sarama.NewRandomPartitioner //写到随机分区中,默认设置8个分区
config.Producer.Return.Successes = true
msg := &sarama.ProducerMessage{}
msg.Topic = `test0`
msg.Value = sarama.StringEncoder("Hello World!")
client, err := sarama.NewSyncProducer([]string{"kafka_master:9092"}, config)
if err != nil {
fmt.Println("producer close err, ", err)
return
}
defer client.Close()
pid, offset, err := client.SendMessage(msg)
if err != nil {
fmt.Println("send message failed, ", err)
return
}
fmt.Printf("分区ID:%v, offset:%v \n", pid, offset)
}
//异步生产者
func AsyncProducer() {
var topics = "test0"
config := sarama.NewConfig()
config.Producer.Return.Successes = true //必须有这个选项
config.Producer.Timeout = 5 * time.Second
p, err := sarama.NewAsyncProducer(strings.Split("kafka_master:9092", ","), config)
defer p.Close()
if err != nil {
return
}
//这个部分一定要写,不然通道会被堵塞
go func(p sarama.AsyncProducer) {
errors := p.Errors()
success := p.Successes()
for {
select {
case err := <-errors:
if err != nil {
glog.Errorln(err)
}
case <-success:
}
}
}(p)
for {
v := "async: " + strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Intn(10000))
fmt.Fprintln(os.Stdout, v)
msg := &sarama.ProducerMessage{
Topic: topics,
Value: sarama.ByteEncoder(v),
}
p.Input() <- msg
time.Sleep(time.Second * 1)
}
}
3.2、消费者
package consumer
import (
"fmt"
"strings"
"sync"
"time"
"github.com/Shopify/sarama"
cluster "github.com/bsm/sarama-cluster"
"github.com/golang/glog"
)
//单个消费者
func Consumer() {
var wg sync.WaitGroup
consumer, err := sarama.NewConsumer([]string{"kafka_master:9092"}, nil)
if err != nil {
fmt.Println("Failed to start consumer: %s", err)
return
}
partitionList, err := consumer.Partitions("test0") //获得该topic所有的分区
if err != nil {
fmt.Println("Failed to get the list of partition:, ", err)
return
}
for partition := range partitionList {
pc, err := consumer.ConsumePartition("test0", int32(partition), sarama.OffsetNewest)
if err != nil {
fmt.Println("Failed to start consumer for partition %d: %s\n", partition, err)
return
}
wg.Add(1)
go func(sarama.PartitionConsumer) { //为每个分区开一个go协程去取值
for msg := range pc.Messages() { //阻塞直到有值发送过来,然后再继续等待
fmt.Printf("Partition:%d, Offset:%d, key:%s, value:%s\n", msg.Partition, msg.Offset, string(msg.Key), string(msg.Value))
}
defer pc.AsyncClose()
wg.Done()
}(pc)
}
wg.Wait()
}
//消费组
func ConsumerGroup() {
groupID := "test-consumer-group"
config := cluster.NewConfig()
config.Group.Return.Notifications = true
config.Consumer.Offsets.CommitInterval = 1 * time.Second
config.Consumer.Offsets.Initial = sarama.OffsetNewest //初始从最新的offset开始
c, err := cluster.NewConsumer(strings.Split("kafka_master:9092", ","), groupID, strings.Split("test0", ","), config)
if err != nil {
glog.Errorf("Failed open consumer: %v", err)
return
}
defer c.Close()
go func(c *cluster.Consumer) {
errors := c.Errors()
noti := c.Notifications()
for {
select {
case err := <-errors:
glog.Errorln(err)
case <-noti:
}
}
}(c)
for msg := range c.Messages() {
fmt.Printf("Partition:%d, Offset:%d, key:%s, value:%s\n", msg.Partition, msg.Offset, string(msg.Key), string(msg.Value))
c.MarkOffset(msg, "") //MarkOffset 并不是实时写入kafka,有可能在程序crash时丢掉未提交的offset
}
}
3.3、 主函数
package main
import (
"strom-huang-go/go_kafka/consumer"
)
func main() {
// produce.AsyncProducer()
consumer.Consumer()
}