Linux Log4j+Kafka+KafkaLog4jAppender 日誌收集
背景:
kafka版本:kafka_2.10-0.8.2.1
伺服器IP:10.243.3.17
一:Kafka server.properties 檔案配置
二:zookeeper.properties 檔案配置
三: zookeeper,kafka啟動
../bin/zookeeper-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/zookeeper.properties
../bin/kafka-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/server.properties &
四:建立Topic
../bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
五: log4j.xml檔案配置
六:常見問題
如遇到問題,首先確定引數配置是否正確,尤其是host,port,advertised.host.name; 然後刪除kafka-logs-1,zookeeper-logs; 重新啟動zookeeper,kafka;
重新建立topic
七:附錄, 非log4j java連線kafka配置參考
import org.apache.log4j.Logger;
import scala.App;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 上午9:21:16
*/
public class KafkaApp {
private static final Logger LOGGER = Logger.getLogger(App.class);
public static void main(String[] args) throws InterruptedException {
for (int i = 0; i < 20; i++) {
LOGGER.info("Info [" + i + "]");
Thread.sleep(1000);
}
}
}
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 下午1:55:56
*/
public class KafakProducer {
private static final String TOPIC = "test";
private static final String CONTENT = "This is a single message";
private static final String BROKER_LIST = "10.243.3.17:8457";
private static final String SERIALIZER_CLASS = "kafka.serializer.StringEncoder";
public static void main(String[] args) {
Properties props = new Properties();
props.put("serializer.class", SERIALIZER_CLASS);
props.put("metadata.broker.list", BROKER_LIST);
ProducerConfig config = new ProducerConfig(props);
Producer producer = new Producer(config);
//Send one message.
KeyedMessage message =
new KeyedMessage(TOPIC, CONTENT);
producer.send(message);
//Send multiple messages.
List<keyedmessage> messages = </keyedmessage
new ArrayList<keyedmessage>();</keyedmessage
for (int i = 0; i < 5; i++) {
messages.add(new KeyedMessage
(TOPIC, "============== send Message. " + i));
}
producer.send(messages);
}
}
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import com.google.common.collect.ImmutableMap;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 上午9:22:04
*/
public class KafkaConsumer {
private static final String ZOOKEEPER = "10.243.3.17:2181";
//groupName可以隨意給,因為對於kafka裡的每條訊息,每個group都會完整的處理一遍
private static final String GROUP_NAME = "test_group";
private static final String TOPIC_NAME = "test";
private static final int CONSUMER_NUM = 4;
private static final int PARTITION_NUM = 4;
public static void main(String[] args) {
// specify some consumer properties
Properties props = new Properties();
props.put("zookeeper.connect", ZOOKEEPER);
props.put("zookeeper.connectiontimeout.ms", "1000000");
props.put("group.id", GROUP_NAME);
// Create the connection to the cluster
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumerConnector =
Consumer.createJavaConsumerConnector(consumerConfig);
// create 4 partitions of the stream for topic “test”, to allow 4
// threads to consume
Map<string, list<kafkastream>> topicMessageStreams =
consumerConnector.createMessageStreams(
ImmutableMap.of(TOPIC_NAME, PARTITION_NUM));
List<kafkastream> streams = topicMessageStreams.get(TOPIC_NAME);</kafkastream
// create list of 4 threads to consume from each of the partitions
ExecutorService executor = Executors.newFixedThreadPool(CONSUMER_NUM);
// consume the messages in the threads
for (final KafkaStream stream : streams) {
executor.submit(new Runnable() {
public void run() {
for (MessageAndMetadata msgAndMetadata : stream) {
// process message (msgAndMetadata.message())
System.out.println(new String(msgAndMetadata.message()));
}
}
});
}
}
}
kafka版本:kafka_2.10-0.8.2.1
伺服器IP:10.243.3.17
一:Kafka server.properties 檔案配置
二:zookeeper.properties 檔案配置
三: zookeeper,kafka啟動
../bin/zookeeper-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/zookeeper.properties
../bin/kafka-server-start.sh -daemon /usr/local/kafka_2.10-0.8.2.1/config/server.properties &
四:建立Topic
../bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
五: log4j.xml檔案配置
六:常見問題
如遇到問題,首先確定引數配置是否正確,尤其是host,port,advertised.host.name; 然後刪除kafka-logs-1,zookeeper-logs; 重新啟動zookeeper,kafka;
重新建立topic
七:附錄, 非log4j java連線kafka配置參考
import org.apache.log4j.Logger;
import scala.App;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 上午9:21:16
*/
public class KafkaApp {
private static final Logger LOGGER = Logger.getLogger(App.class);
public static void main(String[] args) throws InterruptedException {
for (int i = 0; i < 20; i++) {
LOGGER.info("Info [" + i + "]");
Thread.sleep(1000);
}
}
}
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 下午1:55:56
*/
public class KafakProducer {
private static final String TOPIC = "test";
private static final String CONTENT = "This is a single message";
private static final String BROKER_LIST = "10.243.3.17:8457";
private static final String SERIALIZER_CLASS = "kafka.serializer.StringEncoder";
public static void main(String[] args) {
Properties props = new Properties();
props.put("serializer.class", SERIALIZER_CLASS);
props.put("metadata.broker.list", BROKER_LIST);
ProducerConfig config = new ProducerConfig(props);
Producer producer = new Producer(config);
//Send one message.
KeyedMessage message =
new KeyedMessage(TOPIC, CONTENT);
producer.send(message);
//Send multiple messages.
List<keyedmessage> messages = </keyedmessage
new ArrayList<keyedmessage>();</keyedmessage
for (int i = 0; i < 5; i++) {
messages.add(new KeyedMessage
(TOPIC, "============== send Message. " + i));
}
producer.send(messages);
}
}
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import com.google.common.collect.ImmutableMap;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
/**
* TODO:
*
* @author gengchong
* @date 2016年1月5日 上午9:22:04
*/
public class KafkaConsumer {
private static final String ZOOKEEPER = "10.243.3.17:2181";
//groupName可以隨意給,因為對於kafka裡的每條訊息,每個group都會完整的處理一遍
private static final String GROUP_NAME = "test_group";
private static final String TOPIC_NAME = "test";
private static final int CONSUMER_NUM = 4;
private static final int PARTITION_NUM = 4;
public static void main(String[] args) {
// specify some consumer properties
Properties props = new Properties();
props.put("zookeeper.connect", ZOOKEEPER);
props.put("zookeeper.connectiontimeout.ms", "1000000");
props.put("group.id", GROUP_NAME);
// Create the connection to the cluster
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumerConnector =
Consumer.createJavaConsumerConnector(consumerConfig);
// create 4 partitions of the stream for topic “test”, to allow 4
// threads to consume
Map<string, list<kafkastream>> topicMessageStreams =
consumerConnector.createMessageStreams(
ImmutableMap.of(TOPIC_NAME, PARTITION_NUM));
List<kafkastream> streams = topicMessageStreams.get(TOPIC_NAME);</kafkastream
// create list of 4 threads to consume from each of the partitions
ExecutorService executor = Executors.newFixedThreadPool(CONSUMER_NUM);
// consume the messages in the threads
for (final KafkaStream stream : streams) {
executor.submit(new Runnable() {
public void run() {
for (MessageAndMetadata msgAndMetadata : stream) {
// process message (msgAndMetadata.message())
System.out.println(new String(msgAndMetadata.message()));
}
}
});
}
}
}
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/28624388/viewspace-1972027/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- Linux-ELK日誌收集Linux
- rac日誌收集方法
- 日誌收集分析-heka
- HP收集日誌方法
- oracle 日誌收集工具Oracle
- Vector + ClickHouse 收集日誌
- HP日誌收集工具和收集方法
- 使用Kafka做日誌收集Kafka
- 通過 Systemd Journal 收集日誌
- TFA-收集日誌及分析
- Linux下rsyslog日誌收集服務環境部署記錄Linux
- Filebeat 收集日誌的那些事兒
- 日誌收集工具簡單對比
- SpringBoot使用ELK日誌收集Spring Boot
- ELK+logspout收集Docker日誌Docker
- logstash收集springboot日誌Spring Boot
- 日誌收集之filebeat使用介紹
- 輕量級日誌收集方案LokiLoki
- Flume收集日誌到本地目錄
- 使用Fluentd + Elasticsearch收集訪問日誌Elasticsearch
- (四)Logstash收集、解析日誌方法
- Docker應用容器日誌資訊收集Docker
- filebeat 收集nginx日誌輸出到kafkaNginxKafka
- 日誌收集器Filebeat詳解
- android Activity崩潰日誌收集Android
- 【安卓筆記】崩潰日誌收集安卓筆記
- HA維護要收集的日誌
- 鐵威馬nas如何收集日誌
- 日誌分析平臺ELK之日誌收集器filebeat
- k8s容器日誌收集方案K8S
- 透過 Filebeat 收集 ubuntu 系統日誌Ubuntu
- 大資料01-Flume 日誌收集大資料
- k8s 日誌收集之 EFKK8S
- k8s日誌收集實戰K8S
- Docker 日誌都在哪裡?怎麼收集?Docker
- Splunk + Forwarder 收集分析Windows系統日誌ForwardWindows
- 分享一個收集 Nginx 日誌的 ExporterNginxExport
- flume分散式日誌收集系統操作分散式