Using Producer,consumerconnector
Helloworldproducer.java
PackageCn.ljh.kafka.kafka_helloworld;Importjava.util.Date;Importjava.util.Properties;ImportJava.util.Random;ImportKafka.javaapi.producer.Producer;ImportKafka.producer.KeyedMessage;ImportKafka.producer.ProducerConfig; Public classHelloworldproducer { Public Static voidMain (string[] args) {LongEvents = Long.parselong (args[0]); Random Rnd=NewRandom (); Properties Props=NewProperties (); //Configure the broker address of the Kafka cluster, it is recommended to configure more than two, in order to avoid one failure, but do not need to complete, the cluster will automatically find the leader node. Props.put ("Metadata.broker.list", "192.168.137.176:9092,192.168.137.176:9093"); //to configure the serialization class for value//the serialization class of key Key.serializer.class can be configured separately, using the serialization class of value by defaultProps.put ("Serializer.class", "Kafka.serializer.StringEncoder"); //Configure Partitionner selection policy, optional configurationProps.put ("Partitioner.class", "Cn.ljh.kafka.kafka_helloworld". Simplepartitioner "); Props.put ("Request.required.acks", "1"); Producerconfig Config=Newproducerconfig (props); Producer<string, string> producer =NewProducer<string, string>(config); for(Longnevents = 0; Nevents < events; nevents++) { Longruntime =NewDate (). GetTime (); String IP= "192.168.2." + rnd.nextint (255); String msg= Runtime + ", www.example.com," +IP; Keyedmessage<string, string> data =NewKeyedmessage<string, string> ("Page_visits", IP, msg); Producer.send (data); } producer.close (); }}
Simplepartitioner.java
PackageCn.ljh.kafka.kafka_helloworld;ImportKafka.producer.Partitioner;Importkafka.utils.VerifiableProperties; Public classSimplepartitionerImplementsPartitioner { PublicSimplepartitioner (verifiableproperties props) {} Public intPartition (Object key,inta_numpartitions) { intPartition = 0; String Stringkey=(String) key; intoffset = Stringkey.lastindexof ('. ')); if(Offset > 0) {partition= Integer.parseint (stringkey.substring (offset+1))%a_numpartitions; } returnpartition; } }
Consumergroupexample.java
PackageCn.ljh.kafka.kafka_helloworld;ImportKafka.consumer.ConsumerConfig;ImportKafka.consumer.KafkaStream;ImportKafka.javaapi.consumer.ConsumerConnector;ImportJava.util.HashMap;Importjava.util.List;ImportJava.util.Map;Importjava.util.Properties;ImportJava.util.concurrent.ExecutorService;Importjava.util.concurrent.Executors;ImportJava.util.concurrent.TimeUnit; Public classConsumergroupexample {Private FinalConsumerconnector Consumer; Private FinalString topic; PrivateExecutorservice executor; Publicconsumergroupexample (String a_zookeeper, String a_groupid, String a_topic) {consumer=Kafka.consumer.Consumer.createJavaConsumerConnector (Createconsumerconfig (A_zookeeper, a_groupid)); This. Topic =A_topic; } Public voidshutdown () {if(Consumer! =NULL) Consumer.shutdown (); if(Executor! =NULL) Executor.shutdown (); Try { if(!executor.awaittermination (5000, Timeunit.milliseconds)) {System.out.println ("Timed out waiting-consumer threads to shut, exiting Uncleanly"); } } Catch(interruptedexception e) {System.out.println ("Interrupted during shutdown, exiting Uncleanly"); } } Public voidRuninta_numthreads) {Map<string, integer> topiccountmap =NewHashmap<string, integer>(); Topiccountmap.put (Topic,NewInteger (a_numthreads)); Map<string, list<kafkastream<byte[],byte[]>>> Consumermap =Consumer.createmessagestreams (TOPICCOUNTMAP); List<KafkaStream<byte[],byte[]>> streams =consumermap.get (topic); //Now launch all the threads//Executor =Executors.newfixedthreadpool (a_numthreads); //Now create a object to consume the messages// intThreadnumber = 0; for(FinalKafkastream Stream:streams) {Executor.submit (NewConsumertest (Stream, threadnumber)); Threadnumber++; } } Private Staticconsumerconfig createconsumerconfig (String a_zookeeper, String a_groupid) {Properties props=NewProperties (); Props.put ("Zookeeper.connect", A_zookeeper); Props.put ("Group.id", A_groupid); Props.put ("Zookeeper.session.timeout.ms", "400"); Props.put ("Zookeeper.sync.time.ms", "200"); Props.put ("Auto.commit.interval.ms", "1000"); return Newconsumerconfig (props); } Public Static voidMain (string[] args) {//String zooKeeper = args[0];//String groupId = args[1];//String topic = args[2];//int threads = Integer.parseint (args[3]);String ZooKeeper= "192.168.137.176:2181,192.168.137.176:2182,192.168.137.176:2183"; String groupId= "Group1"; String Topic= "Page_visits"; intThreads = 5; consumergroupexample Example=Newconsumergroupexample (ZooKeeper, GroupId, topic); Example.run (threads); Try{Thread.Sleep (10000); } Catch(Interruptedexception IE) {} example.shutdown (); }}
Consumertest.java
PackageCn.ljh.kafka.kafka_helloworld;ImportKafka.consumer.ConsumerIterator;ImportKafka.consumer.KafkaStream; Public classConsumertestImplementsRunnable {PrivateKafkastream M_stream; Private intM_threadnumber; PublicConsumertest (Kafkastream A_stream,intA_threadnumber) {M_threadnumber=A_threadnumber; M_stream=A_stream; } Public voidrun () {Consumeriterator<byte[],byte[]> it =M_stream.iterator (); //threads will wait for a message to enter while(It.hasnext ()) System.out.println ("Thread" + M_threadnumber + ":" +NewString (It.next (). Message ())); System.out.println ("Shutting down Thread:" +m_threadnumber); }}
Java Client Sample code for Kafka (kafka_2.11-0.8.2.2)