storm(1.1.3)與kafka(1.0.0)整合
package hgs.core.sk; import java.util.Map; import org.apache.storm.Config; import org.apache.storm.LocalCluster; import org.apache.storm.StormSubmitter; import org.apache.storm.kafka.BrokerHosts; import org.apache.storm.kafka.KafkaSpout; import org.apache.storm.kafka.SpoutConfig; import org.apache.storm.kafka.ZkHosts; import org.apache.storm.task.OutputCollector; import org.apache.storm.task.TopologyContext; import org.apache.storm.topology.OutputFieldsDeclarer; import org.apache.storm.topology.TopologyBuilder; import org.apache.storm.topology.base.BaseRichBolt; import org.apache.storm.tuple.Tuple; @SuppressWarnings("deprecation") public class StormKafkaMainTest { public static void main(String[] args) { TopologyBuilder builder = new TopologyBuilder(); //zookeeper連結地址 BrokerHosts hosts = new ZkHosts("bigdata01:2181,bigdata02:2181,bigdata03:2181"); //KafkaSpout需要一個config,引數代表的意義1:zookeeper連結,2:消費kafka的topic,3,4:記錄消費offset的zookeeper地址 ,這裡會儲存在 zookeeper //叢集的/test7/consume下面 SpoutConfig sconfig = new SpoutConfig(hosts, "test7", "/test7", "consume"); //消費的時候忽略offset從頭開始消費,這裡可以註釋掉,因為消費的offset在zookeeper中可以找到 sconfig.ignoreZkOffsets=true; //sconfig.scheme = new SchemeAsMultiScheme( new StringScheme() ); builder.setSpout("kafkaspout", new KafkaSpout(sconfig), 1); builder.setBolt("mybolt1", new MyboltO(), 1).shuffleGrouping("kafkaspout"); Config config = new Config(); config.setNumWorkers(1); try { StormSubmitter.submitTopology("storm----kafka--test", config, builder.createTopology()); } catch (Exception e) { e.printStackTrace(); } /* LocalCluster cu = new LocalCluster(); cu.submitTopology("test", config, builder.createTopology());*/ } } class MyboltO extends BaseRichBolt{ private static final long serialVersionUID = 1L; OutputCollector collector = null; public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) { this.collector = collector; } public void execute(Tuple input) { //這裡把訊息大一出來,在對應的woker下面的日誌可以找到列印的內容 //因為得到的內容是byte陣列,所以需要轉換 String out = new String((byte[])input.getValue(0)); System.out.println(out); collector.ack(input); } public void declareOutputFields(OutputFieldsDeclarer declarer) { } }
pom.xml檔案的依賴
<project xmlns=" xsi:schemaLocation=" <modelVersion>4.0.0</modelVersion> <groupId>hgs</groupId> <artifactId>core.sk</artifactId> <version>1.0.0-SNAPSHOT</version> <packaging>jar</packaging> <name>core.sk</name> <url> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> </properties> <dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.storm</groupId> <artifactId>storm-kafka</artifactId> <version>1.1.3</version> </dependency> <dependency> <groupId>org.apache.storm</groupId> <artifactId>storm-core</artifactId> <version>1.1.3</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_2.11</artifactId> <version>1.0.0</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> </exclusions> </dependency> <!-- <dependency> <groupId>org.apache.storm</groupId> <artifactId>storm-kafka-monitor</artifactId> <version>1.2.2</version> </dependency> --> <!-- <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> <version>0.8.2.1</version> </dependency> --> <dependency> <groupId>org.clojure</groupId> <artifactId>clojure</artifactId> <version>1.7.0</version> </dependency> <!-- 嘗試了很多次 都會有這個錯誤: java.lang.NullPointerException at org.apache.storm.kafka.monitor.KafkaOffsetLagUtil.getOffsetLags(KafkaOffsetLagUtil.java:272) 最後修改為kafka相應的kafka-clients版本後問題得到解決,應該是該出的問題 --> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> <version>1.0.0</version> </dependency> </dependencies> <build> <plugins> <plugin> <artifactId>maven-assembly-plugin</artifactId> <version>2.2</version> <configuration> <archive> <manifest> <!-- 我執行這個jar所執行的主類 --> <mainClass>hgs.core.sk.StormKafkaMainTest</mainClass> </manifest> </archive> <descriptorRefs> <descriptorRef> <!-- 必須是這樣寫 --> jar-with-dependencies </descriptorRef> </descriptorRefs> </configuration> <executions> <execution> <id>make-assembly</id> <phase>package</phase> <goals> <goal>single</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>1.8</source> <target>1.8</target> </configuration> </plugin> </plugins> </build> </project>
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/31506529/viewspace-2215087/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- Storm與kafka整合ORMKafka
- Storm 系列(九)—— Storm 整合 KafkaORMKafka
- Storm系列(六)storm和kafka整合ORMKafka
- kafka + storm 整合原始碼案例KafkaORM原始碼
- SpringBoot整合Kafka和StormSpring BootKafkaORM
- Spring boot 整合Kafka+StormSpring BootKafkaORM
- storm與kafka結合ORMKafka
- Kafka實戰-Kafka到StormKafkaORM
- kafka+storm+hbaseKafkaORM
- Kafka 1.0.0 d程式碼示例Kafka
- Cassandra與Kafka的整合Kafka
- Elasticsearch 與 Kafka 整合剖析ElasticsearchKafka
- storm-kafka-client使用ORMKafkaclient
- Kafka實戰-Storm ClusterKafkaORM
- Kafka 1.0.0 多消費者示例Kafka
- storm kafka外掛使用案例ORMKafka
- 《Kafka筆記》4、Kafka架構,與其他元件整合Kafka筆記架構元件
- 新版flume+kafka+storm安裝部署KafkaORM
- 【Twitter Storm系列】flume-ng+Kafka+Storm+HDFS 實時系統搭建ORMKafka
- Kafka應用實踐與生態整合Kafka
- Spring 對Apache Kafka的支援與整合SpringApacheKafka
- 彈性整合Apache Mesos與Apache Kafka框架ApacheKafka框架
- flume+kafka+storm+mysql架構設計KafkaORMMySql架構
- Kafka實時流資料經Storm至HdfsKafkaORM
- 大資料6.1 - 實時分析(storm和kafka)大資料ORMKafka
- Kafka 1.0.0的安裝使用以及一些命令Kafka
- Java訊息佇列:RabbitMQ與Kafka的整合與應用Java佇列MQKafka
- spark與kafaka整合workcount示例 spark-stream-kafkaSparkKafka
- flume-ng+Kafka+Storm+HDFS 實時系統搭建KafkaORM
- Flume 整合 Kafka_flume 到kafka 配置【轉】Kafka
- kafka+flume的整合Kafka
- Spring Boot 整合 KafkaSpring BootKafka
- log4j+kafka+storm+mongodb+mysql 日誌處理KafkaORMMongoDBMySql
- EMQX 4.x 版本更新:Kafka 與 RocketMQ 整合安全增強MQKafka
- 使用Storm、Kafka和ElasticSearch處理實時資料 -javacodegeeksORMKafkaElasticsearchJava
- Flume與Kafka整合--扇入、扇出功能整合,其中扇出包括:複製流、複用流Kafka
- Storm叢集安裝與部署ORM
- Kafka 簡介 & 整合 SpringBootKafkaSpring Boot