Linux Log4j+Kafka+KafkaLog4jAppender 日誌收集

百聯達發表於2016-01-05
背景:
kafka版本:kafka_2.10-0.8.2.1
伺服器IP:10.243.3.17

一:Kafka  server.properties 檔案配置






二:zookeeper.properties 檔案配置



三: zookeeper,kafka啟動

../bin/zookeeper-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/zookeeper.properties

../bin/kafka-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/server.properties &

四:建立Topic

../bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

五: log4j.xml檔案配置



六:常見問題

如遇到問題,首先確定引數配置是否正確,尤其是host,port,advertised.host.name;  然後刪除kafka-logs-1,zookeeper-logs; 重新啟動zookeeper,kafka;
重新建立topic

七:附錄, 非log4j   java連線kafka配置參考

import org.apache.log4j.Logger;


import scala.App;


/**
 * TODO:
 * 
 * @author gengchong
 * @date 2016年1月5日 上午9:21:16
 */
public class KafkaApp {
    private static final Logger LOGGER = Logger.getLogger(App.class);
    public static void main(String[] args) throws InterruptedException {
        for (int i = 0; i < 20; i++) {
            LOGGER.info("Info [" + i + "]");
            Thread.sleep(1000);
        }
    }
}





import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;


/**
 * TODO:
 * 
 * @author gengchong
 * @date 2016年1月5日 下午1:55:56
 */
public class KafakProducer {
    private static final String TOPIC = "test";
    private static final String CONTENT = "This is a single message";
    private static final String BROKER_LIST = "10.243.3.17:8457";
    private static final String SERIALIZER_CLASS = "kafka.serializer.StringEncoder";
     
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("serializer.class", SERIALIZER_CLASS);
        props.put("metadata.broker.list", BROKER_LIST);
         
        ProducerConfig config = new ProducerConfig(props);
        Producer producer = new Producer(config);
 
        //Send one message.
        KeyedMessage message = 
            new KeyedMessage(TOPIC, CONTENT);
        producer.send(message);
         
        //Send multiple messages.
        List<keyedmessage> messages = </keyedmessage
            new ArrayList<keyedmessage>();</keyedmessage
        for (int i = 0; i < 5; i++) {
            messages.add(new KeyedMessage
                (TOPIC, "============== send Message. " + i));
        }
        producer.send(messages);
    }
}

import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;


import com.google.common.collect.ImmutableMap;


import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;


/**
 * TODO:
 * 
 * @author gengchong
 * @date 2016年1月5日 上午9:22:04
 */
public class KafkaConsumer {


    private static final String ZOOKEEPER = "10.243.3.17:2181";
    //groupName可以隨意給,因為對於kafka裡的每條訊息,每個group都會完整的處理一遍
    private static final String GROUP_NAME = "test_group";
    private static final String TOPIC_NAME = "test";
    private static final int CONSUMER_NUM = 4;
    private static final int PARTITION_NUM = 4;
 
    public static void main(String[] args) {
        // specify some consumer properties
        Properties props = new Properties();
        props.put("zookeeper.connect", ZOOKEEPER);
        props.put("zookeeper.connectiontimeout.ms", "1000000");
        props.put("group.id", GROUP_NAME);
 
        // Create the connection to the cluster
        ConsumerConfig consumerConfig = new ConsumerConfig(props);
        ConsumerConnector consumerConnector = 
            Consumer.createJavaConsumerConnector(consumerConfig);
 
        // create 4 partitions of the stream for topic “test”, to allow 4
        // threads to consume
        Map<string, list<kafkastream>> topicMessageStreams = 
            consumerConnector.createMessageStreams(
                ImmutableMap.of(TOPIC_NAME, PARTITION_NUM));
        List<kafkastream> streams = topicMessageStreams.get(TOPIC_NAME);</kafkastream
 
        // create list of 4 threads to consume from each of the partitions
        ExecutorService executor = Executors.newFixedThreadPool(CONSUMER_NUM);
 
        // consume the messages in the threads
        for (final KafkaStream stream : streams) {
            executor.submit(new Runnable() {
                public void run() {
                    for (MessageAndMetadata msgAndMetadata : stream) {
                        // process message (msgAndMetadata.message())
                        System.out.println(new String(msgAndMetadata.message()));
                    }
                }
            });
        }
    }
}


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/28624388/viewspace-1972027/,如需轉載,請註明出處,否則將追究法律責任。

相關文章