Packagedemo;Importjava.util.Properties;ImportKafka.javaapi.producer.Producer;ImportKafka.producer.KeyedMessage;ImportKafka.producer.ProducerConfig; Public classProducer {Private FinalProducer<string, string>producer; Public Final StaticString TOPIC = "Test"; Privateproducer () {Properties props=NewProperties (); //The Kafka port is configured here .Props.put ("Metadata.broker.list", "192.168.152.20:9092"); //to configure the serialization class for valueProps.put ("Serializer.class", "Kafka.serializer.StringEncoder"); //Configuring the serialization class for keyProps.put ("Key.serializer.class", "Kafka.serializer.StringEncoder"); //Request.required.acks//0, which means that the producer never waits for a acknowledgement//From the broker (the same behavior as 0.7). This option provides the//lowest latency But the weakest durability guarantees (some data would//Be lost when a server fails). //1, which means that the producer gets a acknowledgement after the//leader Replica has received the data. This option provides better//durability as the client waits until the server acknowledges the//request as successful (only messages this were written to the//Now-dead leader but not yet replicated would be lost). //-1, which means that the producer gets a acknowledgement after all//In-sync Replicas has received the data. This option provides the//Best durability, we guarantee that no messages 'll be lost as long//As at least one in sync replica remains.Props.put ("Request.required.acks", "1"); Producer=NewProducer<string, String> (Newproducerconfig (props)); } voidProduce () {intMessageno = 1000; Final intCOUNT = 10000; while(Messageno <COUNT) {String key=string.valueof (Messageno); String Data= "Hello Kafka message" +key; Producer.send (NewKeyedmessage<string, string>(TOPIC, key, data)); SYSTEM.OUT.PRINTLN (data); Messageno++; } } Public Static voidMain (string[] args) {Newproducer (). produce (); }}
Packagedemo;ImportJava.util.HashMap;Importjava.util.List;ImportJava.util.Map;Importjava.util.Properties;ImportOrg.apache.kafka.clients.producer.KafkaProducer;ImportKafka.consumer.ConsumerConfig;ImportKafka.consumer.ConsumerIterator;ImportKafka.consumer.KafkaStream;ImportKafka.javaapi.consumer.ConsumerConnector;ImportKafka.serializer.StringDecoder;Importkafka.utils.VerifiableProperties; Public classConsumer {Private FinalConsumerconnector Consumer; Privateconsumer () {Properties props=NewProperties (); //Zookeeper ConfigurationProps.put ("Zookeeper.connect", "192.168.152.20:2181"); //Group represents a consumer groupProps.put ("Group.id", "Jd-group"); //ZK Connection timed outProps.put ("zookeeper.session.timeout.ms", "4000"); Props.put ("Zookeeper.sync.time.ms", "200"); Props.put ("Auto.commit.interval.ms", "1000"); Props.put ("Auto.offset.reset", "smallest"); //Serialization ClassesProps.put ("Serializer.class", "Kafka.serializer.StringEncoder"); Consumerconfig Config=Newconsumerconfig (props); Consumer=kafka.consumer.Consumer.createJavaConsumerConnector (config); } voidconsume () {Map<string, integer> topiccountmap =NewHashmap<string, integer>(); Topiccountmap.put (producer. TOPIC,NewInteger (1)); Stringdecoder Keydecoder=NewStringdecoder (Newverifiableproperties ()); Stringdecoder Valuedecoder=NewStringdecoder (Newverifiableproperties ()); Map<string, list<kafkastream<string, string>>> consumermap =Consumer.createmessagestreams (Topiccountmap,keydecoder,valuedecoder); Kafkastream<string, string> stream = Consumermap.get (producer. TOPIC). Get (0); Consumeriterator<string, string> it =Stream.iterator (); while(It.hasnext ()) System.out.println (It.next (). message ()); } Public Static voidMain (string[] args) {Newconsumer (). consume (); }}
Where the address of the zookeeper, under the directory of Kafka Config/consummer.properties, is to be configured under
Kafka producer Consumer