C + + encapsulation of Librdkafka __c++

Source: Internet
Author: User
Tags getmessage message queue zookeeper

Librdkafka is a Kafka message system of C/s + + Cross-platform open Source Library, on how to build Kafka server, there are many information on the Internet.

I built a kafka_2.12-0.11.0.1 + zookeeper-3.4.10 server on the ubuntu16.04 and succeeded in compiling it on Ubuntu librdkafka_0.11.0.orig.tar.gz


First, test Kafka whether the non-clustered server started successfully

1. Cd/home/guoke/kafka/zookeeper-3.4.10/bin

2. Export zookeeper_home=/home/guoke/kafka/zookeeper-3.4.10

3. Export path= $ZOOKEEPER _home/bin: $PATH

4. Start Zookeeper service $ZOOKEEPER _home/bin/zkserver.sh start//

5. cd/home/guoke/kafka/kafka_2.12-0.11.0.1

6. Nohup bin/kafka-server-start.sh config/server.properties &//Start Kafka Service

7. Open a command terminal in addition to run bin/kafka-console-producer.sh--broker-list localhost:9092--topic test

8. Also open a command terminal to run bin/kafka-console-consumer.sh--zookeeper localhost:2181--topic test--from-beginning

9. Enter "Hello world!" at producer Terminal , the consumer terminal will display the string just now, then the Kafka build success.


Second, the Win7 on the use of Librdkafka to develop producer and consumer, and through the Kafka on Ubuntu message relay.

Like a phone call, user A dials User B's cell phone number, a said, first to the communication satellite, and then the communication satellite transmits this voice signal to User B's cell phone, in this case, Kafka is the role of communication satellite, user A is producer producer, User B is the consumer consumer.

Nonsense not much to say, on the code.

------------------------The following is the code for producer----------------------------------------

KafkaProducer.h//Librdkafka encapsulated Kafka message producer C + + class//386520874@qq.com & 2017.10.10 #pragma once #include "  Librdkafka/rdkafka.h "Class Ckafkaproducer {public:rd_kafka_t * m_kafka_handle;   Kafka message producer Handle rd_kafka_topic_t * M_KAFKA_TOPIC;    Kafka Message Subject name rd_kafka_conf_t * M_KAFKA_CONF;
	Kafka Message Configuration rd_kafka_topic_conf_t * M_KAFKA_TOPIC_CONF;

	rd_kafka_topic_partition_list_t * m_kafka_topic_partition_list;

int m_partition;
	Public:ckafkaproducer ();

	~ckafkaproducer (); int init (char *topic, char *brokers, int partition); Topic= "My_test"; Brokers= "192.168.1.42:9092";
	partition=0; int SendMessage (char *str, int len);
	Send messages to the Kafka server static void Err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque);
	static void Throttle_cb (rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque); static void Offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque);
static int STATS_CB (rd_kafka_t *rk, Char *json, size_t json_len, void *opaque);
 };

#include "KafkaProducer.h" Ckafkaproducer::ckafkaproducer () {m_kafka_handle = NULL;
	M_kafka_topic = NULL;
	m_kafka_conf = NULL;
	m_kafka_topic_conf = NULL;

	M_kafka_topic_partition_list = NULL;
M_partition = Rd_kafka_partition_ua; } ckafkaproducer::~ckafkaproducer () {Rd_kafka_flush (M_kafka_handle, 10*1000);//wait for max seconds rd_kafka_to
	Pic_destroy (M_kafka_topic);
	Rd_kafka_destroy (M_kafka_handle);
Rd_kafka_topic_partition_list_destroy (m_kafka_topic_partition_list);
	int Ckafkaproducer::init (char *topic, char *brokers, int partition) {int ret = 0;
	rd_kafka_conf_res_t ret_conf = RD_KAFKA_CONF_OK;

	Char errstr[512] = {0};

	m_kafka_conf = Rd_kafka_conf_new ();
	RD_KAFKA_CONF_SET_ERROR_CB (m_kafka_conf, ERR_CB);
	RD_KAFKA_CONF_SET_THROTTLE_CB (m_kafka_conf, THROTTLE_CB);
	RD_KAFKA_CONF_SET_OFFSET_COMMIT_CB (m_kafka_conf, OFFSET_COMMIT_CB);

	RD_KAFKA_CONF_SET_STATS_CB (m_kafka_conf, STATS_CB); ---------Producer config-------------------ret_conf = rd_kafka_conf_set (m_kafka_conf, "queue.buffering.max.messages", "500000", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 1; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);

	return-1;}
	ret_conf = Rd_kafka_conf_set (m_kafka_conf, "Message.send.max.retries", "3", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 2; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);

	return-1;}
	ret_conf = Rd_kafka_conf_set (m_kafka_conf, "retry.backoff.ms", "M", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 3; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);

	return-1;}

	---------Kafka topic config-------------------m_kafka_topic_conf = rd_kafka_topic_conf_new ();
	ret_conf = Rd_kafka_topic_conf_set (m_kafka_topic_conf, "Auto.offset.reset", "earliest", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 4; ret_conf=%d Errstr:%s\n ", ret_conf, ERRSTR);

	return-1;}

	M_kafka_topic_partition_list = rd_kafka_topic_partition_list_new (1); Rd_kafka_topic_partition_list_add (m_kafka_topic_partition_list, topic, partition);

	Can add more than one topic m_partition = partition; ---------Create Kafka handle-------------------M_kafka_handle = Rd_kafka_new (Rd_kafka_producer, m_kafka_conf,

	Errstr, sizeof (ERRSTR));
		if (M_kafka_handle = NULL) {printf ("error:failed to create Kafka producer:%s\n", ERRSTR);
	return-1; }//---------ADD broker (s)-------------------if (Brokers && rd_kafka_brokers_add (m_kafka_handle, brokers)
		< 1) {printf ("Error:no Valid brokers specified\n");
	Return-2; } m_kafka_topic = Rd_kafka_topic_new (m_kafka_handle, topic, m_kafka_topic_conf);
Explicitly create topic to avoid per-msg lookups return ret;

	int Ckafkaproducer::sendmessage (char *str, int len) {int ret = 0;
	if (str = NULL) {return-1}

	if (len <= 0) {return-2} char * topic = m_kafka_topic_partition_list->elems[0].topic;

int partition = m_kafka_topic_partition_list->elems[0].partition;
char * buf = (char *) malloc (len);

	memcpy (buf, str, len); ------------send messages to the Kafka server----------------ret = rd_kafka_produce (M_kafka_topic, partition, Rd_kafka_msg_f_copy |

	Rd_kafka_msg_f_free, str, len, NULL, 0, NULL);

		if (ret = = 1) {rd_kafka_resp_err_t err = Rd_kafka_last_error ();
		if (err = = rd_kafka_resp_err__unknown_partition) {printf ("error:no such PARTITION:%" PRId32 "\ n", PARTITION); }else {printf ("Error:produce Error:%s%s\n", rd_kafka_err2str (err), err = = Rd_kafka_resp_err__queue_full?
		"(backpressure)": ""); } rd_kafka_poll (M_kafka_handle, 10);
		Poll to handle delivery reports RET =-2;
	Goto end;

int ret2 = Rd_kafka_poll (m_kafka_handle, 0);
End://---------------------//Free (BUF);

	BUF = NULL;
return ret; } void Ckafkaproducer::err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) {printf (%% errOR CALLBACK:%s:%s:%s\n ", Rd_kafka_name (RK), Rd_kafka_err2str ((rd_kafka_resp_err_t) Err), reason); } void Ckafkaproducer::throttle_cb (rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, voi


D *opaque) {printf ("% throttled%dms by%s (%" PRId32 ") \ n", Throttle_time_ms, Broker_name, broker_id); void Ckafkaproducer::offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *
	offsets, void *opaque) {int i;

	int verbosity = 1; if (Err | | verbosity >= 2) {printf ("percent Offset commit of%d partition (s):%s\n", offsets->cnt, Rd_kafka_err2str (er
	R));

		for (i = 0; i < offsets->cnt; i++) {rd_kafka_topic_partition_t * Rktpar = &offsets->elems[i]; if (Rktpar->err | | verbosity >= 2) {printf (% "%" PRId32 "] @%" PRID64 ":%s\n", Rktpar->topic, Rktpar-&gt
		;p artition, Rktpar->offset, Rd_kafka_err2str (err)); '} ' int ckafkaproducer::stats_cb (rd_kafka_t *rk, Char *json, size_t json_Len, void *opaque) {printf ("%s\n", JSON);
return 0;
 }

Producer Main.cpp: Defines the entry point for a console application.
//

#include "KafkaProducer.h"


int main (int argc, char *argv[])
{
	ckafkaproducer kp;

	Char topic[] = "Test";
	Char brokers[] = "192.168.2.73:9092";
	int partition = 0;

	Char str_msg[] = "Hello kafka!";
	int ret = 0;

	ret = kp.init (topic, brokers, partition);
	if (ret!= 0) {printf ("Error:kp.init (): ret=%d;\n", ret); return 0;}

	ret = Kp.sendmessage (str_msg, strlen (str_msg)); Send message to Kafka server
	if (ret!= 0) {printf ("Error:kp.sendMessage (): ret=%d;\n", ret); return 0;}

	return 0;
}

------------------------The following is the code for consumer----------------------------------------

KafkaConsumer.h//Librdkafka encapsulated Kafka Message Consumer C + + class//386520874@qq.com & 2017.10.10 #pragma once #include "


Librdkafka/rdkafka.h "typedef void (* Consumer_callback) (rd_kafka_message_t *rkmessage, void *opaque);  Class Ckafkaconsumer {public:rd_kafka_t * m_kafka_handle;   Kafka message producer Handle rd_kafka_topic_t * M_KAFKA_TOPIC;    Kafka Message Subject name rd_kafka_conf_t * M_KAFKA_CONF;
	Kafka Message Configuration rd_kafka_topic_conf_t * M_KAFKA_TOPIC_CONF;
	rd_kafka_topic_partition_list_t * m_kafka_topic_partition_list;

	rd_kafka_queue_t * M_KAFKA_QUEUE; Consumer_callback M_consumer_callback; Message callback function void * M_CONSUMER_CALLBACK_PARAM;
	The parameter of the message callback function Public:ckafkaconsumer ();

	~ckafkaconsumer (); int init (char *topic, Char *brokers, Char *partitions, Char *groupid, consumer_callback consumer_cb, void * PARAM_CB); Topic= "My_test"; Brokers= "192.168.1.42:9092"; PaRtitions= "0,1,2";
	Groupid= "My_group"; int GetMessage ();
	Receives messages from the Kafka server static void Err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque);
	static void Throttle_cb (rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque); static void Offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *
	opaque);
	static int STATS_CB (rd_kafka_t *rk, Char *json, size_t json_len, void *opaque);
	static void Logger (const rd_kafka_t *RK, int level, const char *FAC, const char *buf);
static void Msg_consume (rd_kafka_message_t *rkmessage, void *opaque);
 };


#include "KafkaConsumer.h" Ckafkaconsumer::ckafkaconsumer () {m_kafka_handle = NULL;
	M_kafka_topic = NULL;
	m_kafka_conf = NULL;
	m_kafka_topic_conf = NULL;
	M_kafka_topic_partition_list = NULL;

	M_kafka_queue = NULL;
	M_consumer_callback = NULL;
M_consumer_callback_param = NULL; } ckafkaconsumer::~ckafkaconsumer () {Rd_kafka_flush (M_kafka_handle, 10*1000);//wait for max seconds Rd_kafka_que
	Ue_destroy (M_kafka_queue);
	Rd_kafka_topic_destroy (M_kafka_topic);
	Rd_kafka_destroy (M_kafka_handle);
Rd_kafka_topic_partition_list_destroy (m_kafka_topic_partition_list); int Ckafkaconsumer::init (char *topic, Char *brokers, Char *partitions, Char *groupid, Consumer_callback CONSUMER_CB, V
	OID * PARAM_CB) {int ret = 0;
	rd_kafka_conf_res_t ret_conf = RD_KAFKA_CONF_OK;

	Char errstr[512] = {0};
	if (topic = = NULL) {return-1}
	if (brokers = = NULL) {return-1}

	if (groupId = = NULL) {return-1}
	M_consumer_callback = CONSUMER_CB;

	M_consumer_callback_param = PARAM_CB; m_kafka_conf = Rd_kafka_conf_new ();
	RD_KAFKA_CONF_SET_ERROR_CB (m_kafka_conf, ERR_CB);
	RD_KAFKA_CONF_SET_THROTTLE_CB (m_kafka_conf, THROTTLE_CB);
	RD_KAFKA_CONF_SET_OFFSET_COMMIT_CB (m_kafka_conf, OFFSET_COMMIT_CB);
	RD_KAFKA_CONF_SET_STATS_CB (m_kafka_conf, STATS_CB);

	RD_KAFKA_CONF_SET_LOG_CB (m_kafka_conf, logger); ---------Consumer config-------------------ret_conf = rd_kafka_conf_set (m_kafka_conf, "Queued.min.messages", "
	1000000 ", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 1; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);

	return-1;}
	ret_conf = Rd_kafka_conf_set (m_kafka_conf, "session.timeout.ms", "6000", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 2; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);
	
return-1;}
ret_conf = Rd_kafka_conf_set (m_kafka_conf, "group.id", GroupId, Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_conf_set () failed 3; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);
	
	return-1;}

	---------Kafka topic config-------------------m_kafka_topic_conf = rd_kafka_topic_conf_new ();
	ret_conf = Rd_kafka_topic_conf_set (m_kafka_topic_conf, "Auto.offset.reset", "earliest", Errstr, sizeof (ERRSTR)); if (ret_conf!= rd_kafka_conf_ok) {printf ("Error:rd_kafka_topic_conf_set () failed 4; ret_conf=%d; Errstr:%s\n ", ret_conf, ERRSTR);

	return-1;}

	M_kafka_topic_partition_list = rd_kafka_topic_partition_list_new (1);
	------------parse the JSON string------------------------int cnt = 0;
	int len = strlen (partitions);
	char * ptemp = new Char[len + 1];
	char * pTemp2 = ptemp; sprintf (ptemp, "%s", partitions);

	Partitions= "0,1,2";
		while (*ptemp!= ' ") {char * s = strstr (Ptemp,", ");
		if (s!= NULL) {*s = ';
		int partition = Atoi (ptemp); Rd_kafka_topic_partition_list_add (m_kafka_topic_partition_list, topic, partition);
		You can add more than one topic if (S!= NULL) {ptemp = s + 1;
	}else {break;	} if (pTemp2) {delete [] pTemp2; pTemp2 = NULL;} ---------Create Kafka handle-------------------M_kafka_handle = Rd_kafka_new (Rd_kafka_consumer, m_kafka_conf,

	Errstr, sizeof (ERRSTR));
		if (M_kafka_handle = NULL) {printf ("error:failed to create Kafka producer:%s\n", ERRSTR);
	return-1; } rd_kafka_poll_set_consumer (M_kafka_handle); Redirect Rd_kafka_poll () to Consumer_poll ()//---------ADD broker (s)-------------------if (brokers && Rd_kaf
		Ka_brokers_add (M_kafka_handle, brokers) < 1) {printf ("Error:no Valid brokers specified\n");
	Return-2;
	}//char * topic = m_kafka_topic_partition_list->elems[0].topic;
	int partition = m_kafka_topic_partition_list->elems[0].partition;

	int partition_cnt = m_kafka_topic_partition_list->cnt; M_kafka_topic = rd_kafka_topic_new (m_kafka_handle, topic, m_kafka_topic_conf); Explicitly create topic to avoid per-msg lookups//-----------------------------------------//int64_t Seek_offset = Rd_kafka_offset_end; rd_kafka_offset_beginning | Rd_kafka_offset_end |
	
	rd_kafka_offset_stored//rd_kafka_resp_err_t err = Rd_kafka_seek (M_kafka_topic, Partition, Seek_offset, 2000);

	M_kafka_queue = Rd_kafka_queue_new (M_kafka_handle);
return ret;

	int ckafkaconsumer::getmessage () {int ret = 0;
	char * topic = m_kafka_topic_partition_list->elems[0].topic;
	int partition = m_kafka_topic_partition_list->elems[0].partition;

	int partition_cnt = m_kafka_topic_partition_list->cnt; int64_t start_offset = rd_kafka_offset_end; rd_kafka_offset_beginning | Rd_kafka_offset_end | Rd_kafka_offset_stored//------------receive messages from the KAFKA server----------------for (int i = 0; i < partition_cnt; i++) {int P

		Artition = m_kafka_topic_partition_list->elems[i].partition;

		int r = rd_kafka_consume_start_queue (M_kafka_topic, partition, Start_offset, m_kafka_queue);
			if (r = = 1) {printf ("error:creating queue:%s\n", Rd_kafka_err2str (Rd_kafka_last_error ()));
		return-1; }} WHile (1) {int r = rd_kafka_consume_callback_queue (m_kafka_queue, 1000, msg_consume, this);//queue mode if (r <= 0)

		{Rd_kafka_poll (m_kafka_handle, 1000); continue;} Rd_kafka_poll (m_kafka_handle, 0);
Poll to handle stats callbacks//sleep (1000);
	Break }//----------Stop consuming------------------------------for (int i = 0; i < partition_cnt; i++) {int r = Rd_k
		Afka_consume_stop (M_kafka_topic, (int32_t) i);
		if (r = = 1) {printf ("Error:in consume_stop:%s\n", Rd_kafka_err2str (Rd_kafka_last_error ()));
} return ret; } void Ckafkaconsumer::err_cb (rd_kafka_t *rk, int err, const char *reason, void *opaque) {printf ("%% ERROR CALLBACK:%
S:%s:%s\n ", Rd_kafka_name (RK), Rd_kafka_err2str ((rd_kafka_resp_err_t) Err), reason); } void Ckafkaconsumer::throttle_cb (rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, voi

D *opaque) {printf ("% throttled%dms by%s (%" PRId32 ") \ n", Throttle_time_ms, Broker_name, broker_id);
void Ckafkaconsumer::offset_commit_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *
	offsets, void *opaque) {int i;

	int verbosity = 1; if (Err | | verbosity >= 2) {printf ("percent Offset commit of%d partition (s):%s\n", offsets->cnt, Rd_kafka_err2str (er
	R));

		for (i = 0; i < offsets->cnt; i++) {rd_kafka_topic_partition_t * Rktpar = &offsets->elems[i]; if (Rktpar->err | | verbosity >= 2) {printf (% "%" PRId32 "] @%" PRID64 ":%s\n", Rktpar->topic, Rktpar-&gt
		;p artition, Rktpar->offset, Rd_kafka_err2str (err));
	} int CKAFKACONSUMER::STATS_CB (rd_kafka_t *rk, Char *json, size_t json_len, void *opaque) {printf ("%s\n", JSON);
return 0; } void Ckafkaconsumer::logger (const rd_kafka_t *RK, int level, const char *FAC, const char *buf) {fprintf (stdout, "RDK")
afka-%i-%s:%s:%s\n ", Level, FAC, Rd_kafka_name (RK), buf); } void Ckafkaconsumer::msg_consume (rd_kafka_message_t *rkmessage, void *opaque) {CkafkacoNsumer * p = (Ckafkaconsumer *) opaque;
		if (P && p->m_consumer_callback) {p->m_consumer_callback (rkmessage, P->m_consumer_callback_param);
	Return } if (Rkmessage->err) {if (Rkmessage->err = = rd_kafka_resp_err__partition_eof) {printf ([INFO] Consumer re ached end of%s [% ' PRId32 '] message queue at offset% "PRId64 \", Rd_kafka_topic_name (Rkmessage->rkt), rkmessage->p
			Artition, Rkmessage->offset);
		Return printf ("Error:consume Error for topic \%s\" [% "PRId32"] offset% "PRId64":%s\n ", Rkmessage->rkt? rd_kafka_topic

		_name (Rkmessage->rkt): "", Rkmessage->partition, Rkmessage->offset, Rd_kafka_message_errstr (rkmessage));
	Return
	} if (Rkmessage->key_len) {printf ("Key:%d:%s\n", (int) Rkmessage->key_len, (char *) rkmessage->key);
printf ("%d:%s\n", (int) Rkmessage->len, (char *) rkmessage->payload);
 }

Consumer main.cpp: Defines the entry point for a console application.
//

#include "KafkaConsumer.h"


static void Msg_consume (rd_kafka_message_t *rkmessage, void *opaque)
{
	printf ("[MSG]%d:%s\n", (int) Rkmessage->len, (char *) rkmessage->payload);


int main (int argc, char *argv[])
{
	Ckafkaconsumer kc;

	Char topic[] = "Test";
	Char brokers[] = "192.168.2.73:9092";
	Char partitions[] = "0";
	Char groupid[] = "My_group1";
	Consumer_callback CONSUMER_CB = Msg_consume; Registers a message callback function that the user can customize this function
	void * PARAM_CB = NULL;//param_cb=this;
	int ret = 0;

	ret = kc.init (topic, brokers, partitions, groupId, CONSUMER_CB, PARAM_CB);
	if (ret!= 0) {printf ("Error:kc.init (): ret=%d;\n", ret); return 0;}

	ret = Kc.getmessage (); Receive message from Kafka server
	if (ret!= 0) {printf ("Error:kc.getMessage (): ret=%d;\n", ret); return 0;}

	return 0;
}

-----------------------Test Results-------------------------------



Project Source Link: http://download.csdn.net/download/jfu22/10014222

Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.