Kafka詳解五、Kafka Consumer的底層API- SimpleConsumer
五柳-先生發表於2015-11-17
問題導讀
1.Kafka如何實現和Consumer之間的互動?
2.使用SimpleConsumer有哪些弊端呢?
1.Kafka提供了兩套API給Consumer
第一種高度抽象的Consumer API,它使用起來簡單、方便,但是對於某些特殊的需求我們可能要用到第二種更底層的API,那麼先介紹下第二種API能夠幫助我們做哪些事情
複製程式碼
轉載: http://www.aboutyun.com/thread-11117-1-1.html
1.Kafka如何實現和Consumer之間的互動?
2.使用SimpleConsumer有哪些弊端呢?
1.Kafka提供了兩套API給Consumer
- The high-level Consumer API
-
The SimpleConsumer API
第一種高度抽象的Consumer API,它使用起來簡單、方便,但是對於某些特殊的需求我們可能要用到第二種更底層的API,那麼先介紹下第二種API能夠幫助我們做哪些事情
- 一個訊息讀取多次
- 在一個處理過程中只消費Partition其中的一部分訊息
-
新增事務管理機制以保證訊息被處理且僅被處理一次
- 必須在程式中跟蹤offset值
- 必須找出指定Topic Partition中的lead broker
-
必須處理broker的變動
- 從所有活躍的broker中找出哪個是指定Topic Partition中的leader broker
- 找出指定Topic Partition中的所有備份broker
- 構造請求
- 傳送請求查詢資料
-
處理leader broker變更
-
package bonree.consumer;
-
-
import java.nio.ByteBuffer;
-
import java.util.ArrayList;
-
import java.util.Collections;
-
import java.util.HashMap;
-
import java.util.List;
-
import java.util.Map;
-
-
import kafka.api.FetchRequest;
-
import kafka.api.FetchRequestBuilder;
-
import kafka.api.PartitionOffsetRequestInfo;
-
import kafka.common.ErrorMapping;
-
import kafka.common.TopicAndPartition;
-
import kafka.javaapi.FetchResponse;
-
import kafka.javaapi.OffsetResponse;
-
import kafka.javaapi.PartitionMetadata;
-
import kafka.javaapi.TopicMetadata;
-
import kafka.javaapi.TopicMetadataRequest;
-
import kafka.javaapi.consumer.SimpleConsumer;
-
import kafka.message.MessageAndOffset;
-
-
public class SimpleExample {
-
private List<String> m_replicaBrokers = new ArrayList<String>();
-
-
public SimpleExample() {
-
m_replicaBrokers = new ArrayList<String>();
-
}
-
-
public static void main(String args[]) {
-
SimpleExample example = new SimpleExample();
-
// 最大讀取訊息數量
-
long maxReads = Long.parseLong("3");
-
// 要訂閱的topic
-
String topic = "mytopic";
-
// 要查詢的分割槽
-
int partition = Integer.parseInt("0");
-
// broker節點的ip
-
List<String> seeds = new ArrayList<String>();
-
seeds.add("192.168.4.30");
-
seeds.add("192.168.4.31");
-
seeds.add("192.168.4.32");
-
// 埠
-
int port = Integer.parseInt("9092");
-
try {
-
example.run(maxReads, topic, partition, seeds, port);
-
} catch (Exception e) {
-
System.out.println("Oops:" + e);
-
e.printStackTrace();
-
}
-
}
-
-
public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, int a_port) throws Exception {
-
// 獲取指定Topic partition的後設資料
-
PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
-
if (metadata == null) {
-
System.out.println("Can't find metadata for Topic and Partition. Exiting");
-
return;
-
}
-
if (metadata.leader() == null) {
-
System.out.println("Can't find Leader for Topic and Partition. Exiting");
-
return;
-
}
-
String leadBroker = metadata.leader().host();
-
String clientName = "Client_" + a_topic + "_" + a_partition;
-
-
SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
-
long readOffset = getLastOffset(consumer, a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
-
int numErrors = 0;
-
while (a_maxReads > 0) {
-
if (consumer == null) {
-
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
-
}
-
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(a_topic, a_partition, readOffset, 100000).build();
-
FetchResponse fetchResponse = consumer.fetch(req);
-
-
if (fetchResponse.hasError()) {
-
numErrors++;
-
// Something went wrong!
-
short code = fetchResponse.errorCode(a_topic, a_partition);
-
System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
-
if (numErrors > 5)
-
break;
-
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
-
// We asked for an invalid offset. For simple case ask for
-
// the last element to reset
-
readOffset = getLastOffset(consumer, a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
-
continue;
-
}
-
consumer.close();
-
consumer = null;
-
leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
-
continue;
-
}
-
numErrors = 0;
-
-
long numRead = 0;
-
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
-
long currentOffset = messageAndOffset.offset();
-
if (currentOffset < readOffset) {
-
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
-
continue;
-
}
-
-
readOffset = messageAndOffset.nextOffset();
-
ByteBuffer payload = messageAndOffset.message().payload();
-
-
byte[] bytes = new byte[payload.limit()];
-
payload.get(bytes);
-
System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
-
numRead++;
-
a_maxReads--;
-
}
-
-
if (numRead == 0) {
-
try {
-
Thread.sleep(1000);
-
} catch (InterruptedException ie) {
-
}
-
}
-
}
-
if (consumer != null)
-
consumer.close();
-
}
-
-
public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
-
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
-
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
-
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
-
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
-
OffsetResponse response = consumer.getOffsetsBefore(request);
-
-
if (response.hasError()) {
-
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));
-
return 0;
-
}
-
long[] offsets = response.offsets(topic, partition);
-
return offsets[0];
-
}
-
-
/**
-
* @param a_oldLeader
-
* @param a_topic
-
* @param a_partition
-
* @param a_port
-
* @return String
-
* @throws Exception
-
* 找一個leader broker
-
*/
-
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port) throws Exception {
-
for (int i = 0; i < 3; i++) {
-
boolean goToSleep = false;
-
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
-
if (metadata == null) {
-
goToSleep = true;
-
} else if (metadata.leader() == null) {
-
goToSleep = true;
-
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
-
// first time through if the leader hasn't changed give
-
// ZooKeeper a second to recover
-
// second time, assume the broker did recover before failover,
-
// or it was a non-Broker issue
-
//
-
goToSleep = true;
-
} else {
-
return metadata.leader().host();
-
}
-
if (goToSleep) {
-
try {
-
Thread.sleep(1000);
-
} catch (InterruptedException ie) {
-
}
-
}
-
}
-
System.out.println("Unable to find new leader after Broker failure. Exiting");
-
throw new Exception("Unable to find new leader after Broker failure. Exiting");
-
}
-
-
private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic, int a_partition) {
-
PartitionMetadata returnMetaData = null;
-
loop: for (String seed : a_seedBrokers) {
-
SimpleConsumer consumer = null;
-
try {
-
consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
-
List<String> topics = Collections.singletonList(a_topic);
-
TopicMetadataRequest req = new TopicMetadataRequest(topics);
-
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
-
-
List<TopicMetadata> metaData = resp.topicsMetadata();
-
for (TopicMetadata item : metaData) {
-
for (PartitionMetadata part : item.partitionsMetadata()) {
-
if (part.partitionId() == a_partition) {
-
returnMetaData = part;
-
break loop;
-
}
-
}
-
}
-
} catch (Exception e) {
-
System.out.println("Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic + ", " + a_partition + "] Reason: " + e);
-
} finally {
-
if (consumer != null)
-
consumer.close();
-
}
-
}
-
if (returnMetaData != null) {
-
m_replicaBrokers.clear();
-
for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
-
m_replicaBrokers.add(replica.host());
-
}
-
}
-
return returnMetaData;
-
}
- }
相關文章
- Kafka Consumer2018-02-11Kafka
- Kafka Producer Consumer2018-02-10Kafka
- kafka詳解一、Kafka簡介2015-11-17Kafka
- Kafka Consumer 的 Rebalance 機制2019-11-19Kafka
- Kafka java api-生產者程式碼2018-10-05KafkaJavaAPI
- kafka詳解四:Kafka的設計思想、理念2015-11-17Kafka
- alpakka-kafka(2)-consumer2021-02-22Kafka
- 詳解Kafka Producer2019-11-15Kafka
- Kafka詳解二、如何配置Kafka叢集2015-11-17Kafka
- kafka詳解三:開發Kafka應用2015-11-17Kafka
- 聊聊 Kafka Consumer 那點事2021-10-17Kafka
- Kafka核心元件詳解2022-04-30Kafka元件
- kafka核心架構詳解2020-11-14Kafka架構
- Kafka的Consumer負載均衡演算法2018-09-27Kafka負載演算法
- Kafka實戰寶典:Kafka的控制器controller詳解2020-09-21KafkaController
- 一文詳解Kafka API2022-02-11KafkaAPI
- kafka的原理及叢集部署詳解2023-03-18Kafka
- Kafka流處理內幕詳解2021-07-31Kafka
- 詳細解析kafka之kafka分割槽和副本2021-09-09Kafka
- iOS底層系統:BSD層詳解2019-05-07iOS
- Java Api Consumer 連線啟用Kerberos認證的Kafka2018-08-23JavaAPIROSKafka
- volatile底層原理詳解2019-05-29
- 切片底層陣列詳解2021-09-19陣列
- Kafka詳細介紹2018-09-05Kafka
- Kafka與ActiveMQ的區別與聯絡詳解2020-08-13KafkaMQ
- 詳解Kafka與ActiveMQ的區別與聯絡!2021-04-25KafkaMQ
- 兩萬字長文,徹底搞懂Kafka!2021-08-13Kafka
- Kafka學習之(五)搭建kafka叢集之Zookeeper叢集搭建2018-02-14Kafka
- 最佳實踐|從Producer 到 Consumer,如何有效監控 Kafka2022-05-27Kafka
- kafka系列之(3)——Coordinator與offset管理和Consumer Rebalance2017-05-11Kafka
- kafka-ngx_kafka_module2020-11-16Kafka
- 【Kafka】Kafka叢集搭建2017-07-17Kafka
- Kafka實戰-Kafka Cluster2015-05-29Kafka
- 【轉】kafka-檔案儲存機制詳解2019-01-03Kafka
- 如何確定Kafka的分割槽數、key和consumer執行緒數2018-07-24Kafka執行緒
- kafka報錯解決 kafka.errors.NoBrokersAvailable: NoBrokersAvailable2020-11-23KafkaErrorAI
- kafka消費者Consumer引數設定及引數調優建議-kafka 商業環境實戰2018-11-14Kafka
- kafka rebalance 機制與Consumer多種消費模式案例應用實戰-kafka 商業環境實戰2018-11-11Kafka模式