There are few examples of
Go connection Kafka, but there is less support for offset trackers, but it is also a basic requirement. This requirement is met by "Github.com/bsm/sarama-cluster" combined with "Github.com/shopify/sarama".
The number of concurrent hours that can be used to synchronize the producer, but when the concurrency is large, an asynchronous producer must be used.
Environment:
Golang 1.8
Kafka 0.10
CentOS 7.2
Package main import ("FMT" "Math/rand" "OS" "StrConv" "Strings" "Time" "github.com/shopify/ Sarama "" Github.com/bsm/sarama-cluster "//support automatic Consumer-group Rebalancing and offset tracking" GitHub
. Com/sdbaiguanghe/glog ") var (topics =" topics_1 ")//Consumer consumer Func consumer () {GroupID: =" Group-1 " Config: = cluster. Newconfig () config. Group.Return.Notifications = True CONFIG. Consumer.Offsets.CommitInterval = 1 * time. Second CONFIG. Consumer.Offsets.Initial = Sarama. Offsetnewest//Initial start from the latest offset C, err: = cluster. Newconsumer (Strings. Split ("localhost:9092", ","), GroupID, strings. Split (topics, ","), config) if err! = Nil {glog. Errorf ("Failed Open Consumer:%v", err) return} defer C.close () go func (c *cluster.
Consumer) {Errors: = C.errors () Noti: = C.notifications () for {select { Case ERR: = <-errors:glog. ERRORLN (Err) Case <-noti:}}} (c) for msg: = Range c.messages () {FMT . fprintf (OS. Stdout, "%s/%d/%d\t%s\n", Msg. Topic, Msg. Partition, Msg. Offset, Msg. Value) C.markoffset (msg, "")//markoffset is not a real-time write Kafka, it is possible to discard uncommitted offset}}//Syncproducer sync producer//in the program crash The amount of concurrent hours that can be used in this way is Func syncproducer () {config: = Sarama. Newconfig ()//config. Producer.requiredacks = Sarama. Waitforall//CONFIG. Producer.partitioner = Sarama. Newrandompartitioner CONFIG. Producer.Return.Successes = True CONFIG. Producer.timeout = 5 * time. Second p, err: = Sarama. Newsyncproducer (Strings. Split ("localhost:9092", ","), config) defer p.close () if err! = Nil {glog. ERRORLN (ERR) return} V: = "Sync:" + StrConv. Itoa (Rand. New (Rand. Newsource (time. Now (). Unixnano ())). INTN (10000)) FMT. Fprintln (OS. Stdout, v) msg: = &sarama. producermessage{Topic:topics, Value:sarama. Byteencoder (v),} If _, _, Err: = P.sendmessage (msg); Err! = Nil {glog. ERRORLN (ERR) return}}//Asyncproducer async producer//concurrency is large, this method must be used for Func asyncproducer () {config: = Sarama. Newconfig () config. Producer.Return.Successes = TRUE//must have this option CONFIG. Producer.timeout = 5 * time. Second p, err: = Sarama. Newasyncproducer (Strings.
Split ("localhost:9092", ","), config) defer p.close () if err! = nil {return}//must have this anonymous function content Go func (P Sarama.
Asyncproducer) {Errors: = P.errors () Success: = P.successes () for {select { Case ERR: = <-errors:if Err! = Nil {glog. ERRORLN (ERR)} Case <-success:}}} (P) V: = "Async:" + Strco Nv. Itoa (Rand. New (Rand. Newsource (time. Now (). Unixnano ())). INTN (10000)) FMT. Fprintln (OS. Stdout, v) msg: = &sarama. producermessage{Topic:topics, ValuE:sarama.
Byteencoder (v),} p.input () <-msg}
Reference:
Http://pastebin.com/9ZsnP2eU
Https://github.com/Shopify/sarama
https://github.com/bsm/ Sarama-cluster