kafka-2.11-2.0.0 安装配置
vi ./server.properties
broker.id=0
advertised.listeners=PLAINTEXT://192.168.153.141:9092
log.dirs=/opt/bigdata/kafka211/kafka-logs
zookeeper.connect=192.168.153.141:2181
delete.topic.enable=true
// 配置环境变量
export KAFKA_HOME=/opt/bigdata/kafka211
export PATH=$PATH:$KAFKA_HOME/bin
// 启动Kafka
kafka-server-start.sh ./config/server.properties
kafka-server-start.sh -daemon ./config/server.properties
// 创建topic
kafka-topics.sh --create --zookeeper 192.168.56.161:2181 --topic kb07demo --partitions 3 --replication-factor 1
// 查看当前kafka中的topic
kafka-topics.sh --zookeeper 192.168.56.161:2181 --list
// 查看topic详情
kafka-topics.sh --zookeeper 192.168.56.161:2181 --describe --topic kb07demo
// 删除topic
kafka-topics.sh --zookeeper 127.0.0.1:2181 --delete --topic kb05
// 创建生产者,产生数据
kafka-console-producer.sh --topic kb07demo --broker-list 192.168.56.161:9092
// 创建消费者,重头开始取数据
[root@lijia1 ~]# kafka-console-consumer.sh --bootstrap-server 192.168.56.161:9092 --topic kb07demo --from-beginning
// 查看topic消息队列数量
kafka-run-class.sh kafka.tools.GetOffsetShell --broker-list 192.168.56.161:9092 --topic kb07demo -time -1 --offsets 1
Flume 到 kafka
a.sources = s
a.channels = c
a.sinks = sk
a.sources.s.type = spooldir
a.sources.s.spoolDir = /opt/kb07file/flumeFile/user_friends
a.sources.s.deserializer = LINE
a.sources.s.deserializer.maxLineLength = 60000
a.sources.s.includePattern = userFriends_[0-9]{4}-[0-9]{2}-[0-9]{2}.csv
a.sources.s.interceptors = head_filter
a.sources.s.interceptors.head_filter.type = regex_filter
a.sources.s.interceptors.head_filter.regex = ^user*
a.sources.s.interceptors.head_filter.excludeEvents = true
a.channels.c.type = file
a.channels.c.checkpointDir = /opt/kb07file/flumeFile/cheackpoint/userFriends
a.channels.c.dataDirs = /opt/kb07file/flumeFile/data/userFriends
a.sinks.sk.type = org.apache.flume.sink.kafka.KafkaSink
a.sinks.sk.batchSize = 640
a.sinks.sk.brokerList = 192.168.56.161:9092
a.sinks.sk.topic = user_friends_raw
a.sources.s.channels = c
a.sinks.sk.channel = c