-
安装JDK,配置JAVA_HOME环境变量
-
安装Zookeeper集群,并且正常启动
-
启动zk
-
[root@CentOSX ~]# /usr/zookeeper-3.4.6/bin/zkServer.sh start zoo.cfg #启动zk
用jps查看一下是否启动成功
-
必须配置主机名和IP的映射关系(很重要)
-
时钟需要同步|防火墙关闭
-
-
[root@CentOSX ~]# tar -zxf kafka_2.11-0.11.0.0.tgz -C /usr/ #解压
[root@CentOSX ~]# vi /usr/kafka_2.11-0.11.0.0/config/server.properties
############################# Server Basics #############################
broker.id=[0|1|2] # 分别配置0 1 2
delete.topic.enable=true
############################# Socket Server Settings #############################
listeners=PLAINTEXT://[CentOSA|B|C]:9092 #分别配置CentOSA:9092
############################# Log Basics #############################
log.dirs=/usr/kafka-logs
############################# Log Retention Policy #############################
log.retention.hours=168 #设置保存时间168/24=7
############################# Zookeeper #############################
zookeeper.connect=CentOSA:2181,CentOSB:2181,CentOSC:2181 #每个都配
[root@CentOSX ~]# cd /usr/kafka_2.11-0.11.0.0/
[root@CentOSX kafka_2.11-0.11.0.0]# ./bin/kafka-server-start.sh -daemon config/server.properties #启动kafka
关闭kafka服务
-
[root@CentOSA kafka_2.11-0.11.0.0]# vi bin/kafka-server-stop.sh
PIDS=$(jps | grep Kafka | awk '{print $1}')
if [ -z "$PIDS" ]; then
echo "No kafka server to stop"
exit 1
else
kill -s TERM $PIDS
fi
[root@CentOSA kafka_2.11-0.11.0.0]# bin/kafka-server-stop.sh#关闭
[root@CentOSA kafka_2.11-0.11.0.0]# kill -9 `jps | grep Kafka | awk '{print $1}'` 强制关闭
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-server-start.sh -daemon config/server.properties开启
验证是否正常启动
-
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-topics.sh
--zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181
--create
--topic topic01 #名字
--partitions 3 #分区3
--replication-factor 3
//启动消费者
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-console-consumer.sh --bootstrap-server CentOSA:9092,CentOSB:9092,CentOSC:9092 --topic topic01
//启动生产者
[root@CentOSB kafka_2.11-0.11.0.0]# ./bin/kafka-console-producer.sh --broker-list CentOSA:9092,CentOSB:9092,CentOSC:9092 --topic topic01
Topic管理
-
添加topic
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-topics.sh
--zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181
--create
--topic topic01
--partitions 3
--replication-factor 3
查看topic详情
-
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-topics.sh
--zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181
--describe
--topic topic01
Topic:topic01 PartitionCount:3 ReplicationFactor:3 Configs:
Topic: topic01 Partition: 0 Leader: 2 Replicas: 2,1,0 Isr: 0,1,2
Topic: topic01 Partition: 1 Leader: 0 Replicas: 0,2,1 Isr: 0,1,2
Topic: topic01 Partition: 2 Leader: 1 Replicas: 1,0,2 Isr: 0,1,2
查看topic列表
-
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-topics.sh
--zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181
--list#查看所有的列表
__consumer_offsets # 系统自带的topic用于存储系统元数据
topic01
topic02
删除Topic信息
-
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-topics.sh
--zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181
--delete
--topic topic02
修改分区信息
-
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-topics.sh
--zookeeper CentOSA:2181,CentOSB:2181,CentOSC:2181
--alter
--topic topic01
--partitions 4 #只能往大的修改
删除一则已经发布的消息(很少)
-
[root@CentOSA kafka_2.11-0.11.0.0]# ./bin/kafka-delete-records.sh
--bootstrap-server CentOSA:9092,CentOSB:9092,CentOSC:9092
--offset-json-file /root/deleterecord.json
---
{"partitions":
[
{"topic": "topic01", "partition": 1,"offset": 100}
]
}
组内
均分分区组间
广播.
-
Java API 操作kafka
-
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.11.0.0</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.5</version>
</dependency>
删除topic
-
Properties props=new Properties();
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
AdminClient adminClient = KafkaAdminClient.create(props);
Collection<String> topics= Arrays.asList("topic01");
adminClient.deleteTopics(topics);
adminClient.close();
创建Topic
-
Properties props=new Properties();
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
AdminClient adminClient = KafkaAdminClient.create(props);
Collection<NewTopic> topics= Arrays.asList(
new NewTopic("topic01",3, (short) 3));
adminClient.createTopics(topics);
adminClient.close();
发布一则消息
-
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, DefaultPartitioner.class);
KafkaProducer<String,String> pd=new KafkaProducer<String, String>(props);
//封装record
for(int i=0;i<10;i++){
ProducerRecord<String, String> record
= new ProducerRecord<String, String>("topic01", "00"+i+"0","user0100 true 15000");
//发送消息
pd.send(record);
}
pd.flush();
pd.close();
订阅消息*
-
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "g1");
System.out.println("g1");
//创建consumer
KafkaConsumer<String,String> kc=new KafkaConsumer<String, String>(props);
//订阅topics
Collection<String> topics= Arrays.asList("topic01");
kc.subscribe(topics);
while (true){
ConsumerRecords<String, String> records = kc.poll(1000);//间隔一秒钟查一次
for (ConsumerRecord<String, String> record : records) {
String key=record.key();
String value=record.value();
int partition=record.partition();
long offset=record.offset();
long ts=record.timestamp();
System.out.println(key+":"+value+"\t"+partition+"|"+offset+",ts="+ts);
}
}
自定义序列化和反序列实现发送object
-
public class ObjectDeserializer implements Deserializer<Object> {
public void configure(Map<String, ?> map, boolean b) {
}
public Object deserialize(String s, byte[] bytes) {
Object obj = SerializationUtils.deserialize(bytes);
return obj;
}
public void close() {
}
}
---
public class ObjectSerialization implements Serializer<Object> {
public void configure(Map<String, ?> map, boolean b) {
}
public byte[] serialize(String s, Object o) {
return SerializationUtils.serialize((Serializable) o);
}
public void close() {
}
}
Kafka 流处理
-
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
<version>0.11.0.0</version>
</dependency>
Java7 字符统计
-
Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-topology");
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
"CentOSA:9092,CentOSB:9092,CentOSC:9092");
config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,
Serdes.String().getClass());
config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,
Serdes.String().getClass());
//构建计算网络拓扑
KStreamBuilder builder = new KStreamBuilder();
KStream<String, String> textLines = builder.stream("TextLinesTopic");
textLines.flatMapValues(new ValueMapper<String, Iterable<String>>() {
public Iterable<String> apply(String line) {
return Arrays.asList(line.split(" "));
}
})
.groupBy(new KeyValueMapper<String, String, String>() {
public String apply(String key, String value) {
return value;
}
}).count("Counts").to(Serdes.String(), Serdes.Long(), "WordsWithCountsTopic");
KafkaStreams streams = new KafkaStreams(builder, config);