Go串連Kafka的例子好少,而支援offset追蹤者更少,但也是基礎的要求。”github.com/bsm/sarama-cluster”結合“github.com/Shopify/sarama”滿足了此要求。
並發量小時,可以用同步生產者,但是並發量大時,必須使用非同步生產者。
環境:
golang 1.8
kafka 0.10
centos 7.2
package mainimport ( "fmt" "math/rand" "os" "strconv" "strings" "time" "github.com/Shopify/sarama" "github.com/bsm/sarama-cluster" //support automatic consumer-group rebalancing and offset tracking "github.com/sdbaiguanghe/glog")var ( topics = "topics_1")// consumer 消費者func consumer() { groupID := "group-1" config := cluster.NewConfig() config.Group.Return.Notifications = true config.Consumer.Offsets.CommitInterval = 1 * time.Second config.Consumer.Offsets.Initial = sarama.OffsetNewest //初始從最新的offset開始 c, err := cluster.NewConsumer(strings.Split("localhost:9092", ","), groupID, strings.Split(topics, ","), config) if err != nil { glog.Errorf("Failed open consumer: %v", err) return } defer c.Close() go func(c *cluster.Consumer) { errors := c.Errors() noti := c.Notifications() for { select { case err := <-errors: glog.Errorln(err) case <-noti: } } }(c) for msg := range c.Messages() { fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Value) c.MarkOffset(msg, "") //MarkOffset 並不是即時寫入kafka,有可能在程式crash時丟掉未提交的offset }}// syncProducer 同步生產者// 並發量小時,可以用這種方式func syncProducer() { config := sarama.NewConfig() // config.Producer.RequiredAcks = sarama.WaitForAll // config.Producer.Partitioner = sarama.NewRandomPartitioner config.Producer.Return.Successes = true config.Producer.Timeout = 5 * time.Second p, err := sarama.NewSyncProducer(strings.Split("localhost:9092", ","), config) defer p.Close() if err != nil { glog.Errorln(err) return } v := "sync: " + strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Intn(10000)) fmt.Fprintln(os.Stdout, v) msg := &sarama.ProducerMessage{ Topic: topics, Value: sarama.ByteEncoder(v), } if _, _, err := p.SendMessage(msg); err != nil { glog.Errorln(err) return }}// asyncProducer 非同步生產者// 並發量大時,必須採用這種方式func asyncProducer() { config := sarama.NewConfig() config.Producer.Return.Successes = true //必須有這個選項 config.Producer.Timeout = 5 * time.Second p, err := sarama.NewAsyncProducer(strings.Split("localhost:9092", ","), config) defer p.Close() if err != nil { return } //必須有這個匿名函數內容 go func(p sarama.AsyncProducer) { errors := p.Errors() success := p.Successes() for { select { case err := <-errors: if err != nil { glog.Errorln(err) } case <-success: } } }(p) v := "async: " + strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Intn(10000)) fmt.Fprintln(os.Stdout, v) msg := &sarama.ProducerMessage{ Topic: topics, Value: sarama.ByteEncoder(v), } p.Input() <- msg}
參考:
http://pastebin.com/9ZsnP2eU
https://github.com/Shopify/sarama
https://github.com/bsm/sarama-cluster