storm(1.1.3)與kafka(1.0.0)整合

hgs19921112發表於2018-09-27
package hgs.core.sk;
import java.util.Map;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple;
@SuppressWarnings("deprecation")
public class StormKafkaMainTest {
	
	public static void main(String[] args) {
		TopologyBuilder builder = new TopologyBuilder();
		//zookeeper連結地址
		BrokerHosts hosts = new ZkHosts("bigdata01:2181,bigdata02:2181,bigdata03:2181");
		//KafkaSpout需要一個config,引數代表的意義1:zookeeper連結,2:消費kafka的topic,3,4:記錄消費offset的zookeeper地址 ,這裡會儲存在 zookeeper
		//叢集的/test7/consume下面
		SpoutConfig sconfig = new SpoutConfig(hosts, "test7", "/test7", "consume");
		//消費的時候忽略offset從頭開始消費,這裡可以註釋掉,因為消費的offset在zookeeper中可以找到
		sconfig.ignoreZkOffsets=true;
		//sconfig.scheme = new SchemeAsMultiScheme( new StringScheme() );
		builder.setSpout("kafkaspout", new KafkaSpout(sconfig), 1);
		builder.setBolt("mybolt1", new MyboltO(), 1).shuffleGrouping("kafkaspout");
		
     	Config config = new Config();
     	config.setNumWorkers(1);
     	try {
			StormSubmitter.submitTopology("storm----kafka--test", config, builder.createTopology());
		} catch (Exception e) {
			e.printStackTrace();
		}
     	
 /*    	LocalCluster cu  = new LocalCluster();
     	cu.submitTopology("test", config, builder.createTopology());*/
	}
}
class  MyboltO extends  BaseRichBolt{
	private static final long serialVersionUID = 1L;
	OutputCollector collector = null;
	public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
		this.collector = collector;
	}
	public void execute(Tuple input) {
		//這裡把訊息大一出來,在對應的woker下面的日誌可以找到列印的內容
		//因為得到的內容是byte陣列,所以需要轉換
		String out = new String((byte[])input.getValue(0));
		System.out.println(out);
		collector.ack(input);
		
	}
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		
	}
	
	
}
pom.xml檔案的依賴
<project xmlns="
  xsi:schemaLocation="
  <modelVersion>4.0.0</modelVersion>
  <groupId>hgs</groupId>
  <artifactId>core.sk</artifactId>
  <version>1.0.0-SNAPSHOT</version>
  <packaging>jar</packaging>
  <name>core.sk</name>
  <url>
  <properties>
    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
  </properties>
  <dependencies>
    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>3.8.1</version>
      <scope>test</scope>
    </dependency>
    
    <dependency>
    	<groupId>org.apache.storm</groupId>
    	<artifactId>storm-kafka</artifactId>
    	<version>1.1.3</version>
	</dependency>
	<dependency>
  		<groupId>org.apache.storm</groupId>
 		 <artifactId>storm-core</artifactId>
  		<version>1.1.3</version>
  		<scope>provided</scope>
	</dependency>
	<dependency>
    	<groupId>org.apache.kafka</groupId>
    	<artifactId>kafka_2.11</artifactId>
    	<version>1.0.0</version>
    <exclusions>
    		<exclusion>
          		<groupId>org.slf4j</groupId>
          		<artifactId>slf4j-log4j12</artifactId>
        	</exclusion>
        	<exclusion>
            	<groupId>org.apache.zookeeper</groupId>
            	<artifactId>zookeeper</artifactId>
       		</exclusion>
    	</exclusions>
	</dependency>
	
<!-- 	<dependency>
    	<groupId>org.apache.storm</groupId>
    	<artifactId>storm-kafka-monitor</artifactId>
    	<version>1.2.2</version>
	</dependency> -->
<!-- 	<dependency>
    	<groupId>org.apache.kafka</groupId>
    	<artifactId>kafka-clients</artifactId>
    	<version>0.8.2.1</version>
	</dependency> -->
	
	<dependency>
	    <groupId>org.clojure</groupId>
	    <artifactId>clojure</artifactId>
	    <version>1.7.0</version>
	</dependency>
	<!-- 嘗試了很多次 都會有這個錯誤:
	java.lang.NullPointerException at org.apache.storm.kafka.monitor.KafkaOffsetLagUtil.getOffsetLags(KafkaOffsetLagUtil.java:272)
	最後修改為kafka相應的kafka-clients版本後問題得到解決,應該是該出的問題
	-->
	<dependency>
	    <groupId>org.apache.kafka</groupId>
	    <artifactId>kafka-clients</artifactId>
	    <version>1.0.0</version>
	</dependency>
	
 </dependencies>
  
  
  
  <build>
        <plugins>
            <plugin>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>2.2</version>
                <configuration>
                    <archive>
                        <manifest>
                            <!-- 我執行這個jar所執行的主類 -->
                            <mainClass>hgs.core.sk.StormKafkaMainTest</mainClass>
                        </manifest>
                    </archive>
                    <descriptorRefs>
                        <descriptorRef>
                            <!-- 必須是這樣寫 -->
                            jar-with-dependencies
                        </descriptorRef>
                    </descriptorRefs>
                </configuration>
                
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
            
             <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                </configuration>
            </plugin>
        </plugins>
    </build>
</project>


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/31506529/viewspace-2215087/,如需轉載,請註明出處,否則將追究法律責任。

相關文章