In the Kafka download page, download version 0.8, unzip.
1. Modify the server.properties inside the Config directory to host.name the IP of the machine. If the deployment Kafka and the development run Kafka example for the same machine without modification, with the default localhost also line.
2. Modify the DataDir property in the Config directory to zookeeper.properties the directory you want.
3. If you want to configure the cluster, create a new Zoo_data directory under the Kafka decompression directory (new for the first time), create a new myID file in the Zoo_data directory, and set the content to 1. At the same time modify the zookeeper.properties, for specific reference: Solrcloud under Tomcat installation (iii)
4. Start Kafka.
Start Zookeeper server (with & to be able to exit the command line):
bin/zookeeper-server-start.sh config/zookeeper.properties &
//Start Kafka server:
bin/kafka-server-start.sh config/server.properties & |
5. Create a new producer example
Import java.util.Properties;
Import Kafka.javaapi.producer.Producer;
Import Kafka.producer.KeyedMessage;
Import Kafka.producer.ProducerConfig;
public class Kafkatest {public static void main (string[] args) {Properties props = new Properties ();
Props.put ("Zk.connect", "10.103.22.47:2181");
Props.put ("Serializer.class", "Kafka.serializer.StringEncoder");
Props.put ("Metadata.broker.list", "10.103.22.47:9092");
Props.put ("Request.required.acks", "1");
Props.put ("Partitioner.class", "Com.xq.SimplePartitioner");
Producerconfig config = new Producerconfig (props);
producer<string, string> Producer = new producer<string, string> (config);
String IP = "192.168.2.3";
String msg = "This is a messageuuu!";
keyedmessage<string, string> data = new keyedmessage<string, string> ("test", ip,msg);
Producer.send (data);
Producer.close (); } &NBsp } |
Create a new consumer example
Import Java.nio.ByteBuffer;
Import Java.util.HashMap;
Import java.util.List;
Import Java.util.Map;
Import java.util.Properties;
Import Java.util.concurrent.ExecutorService;
Import java.util.concurrent.Executors;
Import Kafka.consumer.Consumer;
Import Kafka.consumer.ConsumerConfig;
Import Kafka.consumer.KafkaStream;
Import Kafka.javaapi.consumer.ConsumerConnector;
Import Kafka.message.Message;
Import Kafka.message.MessageAndMetadata; public class Consumersample { public static void main (string[] args) {//Specify some consum
ER properties Properties props = new properties ();
Props.put ("Zookeeper.connect", "10.103.22.47:2181");
Props.put ("zookeeper.connectiontimeout.ms", "1000000");
Props.put ("Group.id", "Test_group");
//Create the connection to the cluster consumerconfig consumerconfig = new Consumerconfig (props);
Consumerconnector connector = consumer.createjavaconsumerconnector (consumerconfig); &NBSp
map<string,integer> topics = new hashmap<string,integer> ();
Topics.put ("Test", 2); Map<string, list<kafkastream<byte[], byte[]>>> topicmessagestreams =
Connector.createmessagestreams (topics);
List<kafkastream<byte[], byte[]>> streams = topicmessagestreams.get ("test");
Executorservice ThreadPool = Executors.newfixedthreadpool (2);
For (final kafkastream<byte[], byte[]> stream:streams) {threadpool.submit (new Runnable () {
public void Run () {for (Messageandmetadata Msgandmetadata:stream) { Process message (Msgandmetadata.message ()) System.out.println ("topic:" + msgandmetadata.top
IC ());
Message message = (message) msgandmetadata.message ();
Bytebuffer buffer = Message.payload (); byte[] bytes = new Byte[message.payloadsiZe ()];
Buffer.get (bytes);
string tmp = new string (bytes);
SYSTEM.OUT.PRINTLN ("Message content:" + tmp);
}
}
}); }
}
} |
Start the consumer example first, then start the producer example, which will immediately see the effect.
This article fixed link: http://www.chepoo.com/kafka-single-development-environment-example.html | It Technology Essence Network