Java實現Kafka讀寫筆記
1.POM.XML
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.8.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>0.8.2.1</version>
</dependency>
</dependencies>
2.生成者
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import java.util.Properties;
public class RunKafkaProduce {
private final Producer<String, String> producer;
public final static String TOPIC = "logstest";
private RunKafkaProduce(){
Properties props = new Properties();
// 此處配置的是kafka的broker地址:埠列表
props.put("metadata.broker.list", "172.19.4.230:9092");
//配置value的序列化類
props.put("serializer.class", "kafka.serializer.StringEncoder");
//配置key的序列化類
props.put("key.serializer.class", "kafka.serializer.StringEncoder");
//request.required.acks
//0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
//1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
//-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
props.put("request.required.acks","-1");
producer = new Producer<String, String>(new ProducerConfig(props));
}
void produce() {
int messageNo = 1;
final int COUNT = 101;
int messageCount = 0;
while (messageNo < COUNT) {
String key = String.valueOf(messageNo);
String data = "Hello kafka message :" + key;
producer.send(new KeyedMessage<String, String>(TOPIC, key ,data));
System.out.println(data);
messageNo ++;
messageCount++;
}
System.out.println("Producer端一共產生了" + messageCount + "條訊息!");
}
public static void main( String[] args )
{
new RunKafkaProduce().produce();
}
}
3.消費著
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;
import org.apache.kafka.clients.producer.KafkaProducer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* ////////////////////////////////////////////////////////////////////
* // _ooOoo_ //
* // o8888888o //
* // 88" . "88 //
* // (| ^_^ |) //
* // O\ = /O //
* // ____/`---'\____ //
* // .' \\| |// `. //
* // / \\||| : |||// \ //
* // / _||||| -:- |||||- \ //
* // | | \\\ - /// | | //
* // | \_| ''\---/'' | | //
* // \ .-\__ `-` ___/-. / //
* // ___`. .' /--.--\ `. . ___ //
* // ."" '< `.___\_<|>_/___.' >'"". //
* // | | : `- \`.;`\ _ /`;.`/ - ` : | | //
* // \ \ `-. \_ __\ /__ _/ .-` / / //
* // ========`-.____`-.___\_____/___.-`____.-'======== //
* // `=---=' //
* // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ //
* // 佛祖保佑 再無Bug //
* ////////////////////////////////////////////////////////////////////
* User:Klin
* Date:2017/4/18 0018
*/
public class RunKafkaConsumer {
private final ConsumerConnector consumer;
private final static String TOPIC="logstest";
private RunKafkaConsumer(){
Properties props=new Properties();
//zookeeper
props.put("zookeeper.connect","zero230:2181");
//topic
props.put("group.id","logstest");
//Zookeeper 超時
props.put("zookeeper.session.timeout.ms", "4000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest");
props.put("serializer.class", "kafka.serializer.StringEncoder");
ConsumerConfig config=new ConsumerConfig(props);
consumer= kafka.consumer.Consumer.createJavaConsumerConnector(config);
}
void consume(){
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(TOPIC, new Integer(1));
StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get(TOPIC).get(0);
ConsumerIterator<String, String> it = stream.iterator();
int messageCount = 0;
while (it.hasNext()){
System.out.println(it.next().message());
messageCount++;
if(messageCount == 100){
System.out.println("Consumer端一共消費了" + messageCount + "條訊息!");
}
}
}
public static void main(String[] args) {
new RunKafkaConsumer().consume();
}
}
相關文章
- 《Kafka入門與實踐》讀書筆記Kafka筆記
- Kafka文件閱讀筆記(一)Kafka筆記
- 讀寫者問題-java實現Java
- 《自我實現的人》讀書筆記筆記
- Java Thread實現讀寫同步 (轉)Javathread
- 7.7 實現程式記憶體讀寫記憶體
- kafka 筆記Kafka筆記
- 【讀書筆記】Java併發機制的底層實現原理筆記Java
- Java學習筆記-----從套接字中讀寫資料Java筆記
- LevelDB學習筆記 (2): 整體概覽與讀寫實現細節筆記
- Effective Java 讀書筆記Java筆記
- Lua設計與實現--讀書筆記筆記
- 《Redis設計與實現》讀書筆記Redis筆記
- 《Kafka筆記》1、Kafka初識Kafka筆記
- Kafka 學習筆記(二) :初探 KafkaKafka筆記
- 《Kafka筆記》3、Kafka高階APIKafka筆記API
- Kafka學習筆記(二) :初探KafkaKafka筆記
- 《資料庫系統實現》讀書筆記資料庫筆記
- java讀書筆記---垃圾回收Java筆記
- Effective Java 讀書筆記(2)Java筆記
- Java程式設計師乾貨學習筆記—Spring結合MyBatis實現資料庫讀寫分離Java程式設計師筆記SpringMyBatis資料庫
- 筆記:初識Kafka筆記Kafka
- Kafka 學習筆記Kafka筆記
- 現代作業系統-原理與實現【讀書筆記】作業系統筆記
- 《Effective C++》第5章 實現-讀書筆記C++筆記
- Java 併發包中的讀寫鎖及其實現分析Java
- 讀書筆記-----Java中的引用筆記Java
- Effective Java讀書筆記(目錄)Java筆記
- head first java讀書筆記Java筆記
- 《Java8實戰》-讀書筆記第二章Java筆記
- Java 併發程式設計實踐 讀書筆記四Java程式設計筆記
- Amoeba實現讀寫分離
- kafka學習筆記(一)Kafka筆記
- 讀書寫筆記-王爽《組合語言》筆記組合語言
- C++學習筆記----讀寫檔案C++筆記
- 【kafka學習筆記】kafka的基本概念Kafka筆記
- Flink 1.9 實戰:使用 SQL 讀取 Kafka 並寫入 MySQLKafkaMySql
- Java併發程式設計實戰--讀書筆記(目錄)Java程式設計筆記