Initial commit

This commit is contained in:
Cruise Zhao
2020-04-06 19:57:19 +08:00
commit 2fc86fd0a1
42 changed files with 4056 additions and 0 deletions

55
kafka/README.md Normal file
View File

@@ -0,0 +1,55 @@
# kafka cluster封装
> 目前只封装了consumer单个consumer支持同时消费一个topic
> 如果指定了group且group名称开头不为'test_consumer_'则认为是正式订阅,会在处理完成后标记消息已经被消费
> 否则会生成随机的group名称用于测试
## 示例代码如下
```go
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"golib/kafka"
)
// 回调的消息处理函数,用户自己实现的消息处理逻辑
func processor(msg []byte) {
fmt.Println("recv msg:", string(msg))
}
func main() {
// 配置,可以从文件加载
cfg := kafka.Config{
Brokers: []string{"test.kafka.o-kash.com:9092"},
Topic: "charge_success",
Workers: 5, // 并发的回调函数数量不是partition的数量
}
if consumer := kafka.NewConsumer(&cfg, processor); consumer != nil {
consumer.Run()
// 监控退出信号通知consumer退出
ch := make(chan os.Signal)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
<-ch
consumer.Stop()
}
}
```
## 产生测试用的消息
使用官方工具
```shell
./kafka-console-producer.sh --broker-list test.kafka.o-kash.com:9092 --topic charge_success
```

123
kafka/kafka.go Normal file
View File

@@ -0,0 +1,123 @@
package kafka
import (
"fmt"
"math/rand"
"strings"
"sync"
"globalfintech/golib/tlog"
"github.com/Shopify/sarama"
cluster "github.com/bsm/sarama-cluster"
)
type Config struct {
Brokers []string `toml:"brokers"`
Topic string `toml:"topic"`
Group string `toml:"group"`
Workers int `toml:"workers"`
Oldest bool `toml:"oldest"`
}
type Consumer struct {
test bool
consumer *cluster.Consumer
workers int
handler func([]byte)
stop chan bool
wg *sync.WaitGroup
}
const (
TEST_PREFIX = "test_consumer_"
)
func NewConsumer(cfg *Config, hdl func([]byte)) *Consumer {
var (
csm *cluster.Consumer
err error
group string
)
config := cluster.NewConfig()
config.Consumer.Return.Errors = true
config.Group.Return.Notifications = true
if cfg.Oldest {
config.Consumer.Offsets.Initial = sarama.OffsetOldest
} else {
config.Consumer.Offsets.Initial = sarama.OffsetNewest
}
group = cfg.Group
if group == "" {
group = fmt.Sprintf("%s%.8x", TEST_PREFIX, rand.Int63())
}
tlog.Infof("NewConsumer() brokers=%#v, topic=%s, workers=%d, group=%s", cfg.Brokers, cfg.Topic, cfg.Workers, group)
if csm, err = cluster.NewConsumer(cfg.Brokers, group, []string{cfg.Topic}, config); err != nil {
tlog.Errorf("NewConsumer() create fail: %s", err)
return nil
}
tlog.Infof("NewConsumer() Created OK! %#v", csm)
go func() {
for err := range csm.Errors() {
tlog.Errorf("KafkaConsumer<brokers=%#v, group=%s> Error: %s", cfg.Brokers, group, err.Error())
}
}()
go func() {
for ntf := range csm.Notifications() {
tlog.Infof("KafkaConsumer<brokers=%#v, group=%s> Notice: %#v", cfg.Brokers, group, ntf)
}
}()
return &Consumer{
test: strings.HasPrefix(group, TEST_PREFIX),
consumer: csm,
workers: cfg.Workers,
handler: hdl,
stop: make(chan bool),
wg: &sync.WaitGroup{},
}
}
func (self *Consumer) Run() {
for i := 0; i < self.workers; i++ {
go self.Worker()
}
}
func (self *Consumer) Stop() {
close(self.stop)
self.wg.Wait()
self.consumer.Close()
}
func (self *Consumer) Worker() {
self.wg.Add(1)
defer self.wg.Done()
for {
select {
case msg, ok := <-self.consumer.Messages():
if ok {
//tlog.Debugf("topic: %s, partition: %d, offset: %d, msg: %s", msg.Topic, msg.Partition, msg.Offset, string(msg.Value))
self.handler(msg.Value)
// 测试用的group不标记处理完毕防止破坏集群数据
if !self.test {
self.consumer.MarkOffset(msg, "done")
}
}
case <-self.stop:
tlog.Info("KafkaConsumer stop signal received!")
return
}
}
}