1. 安装docker-compose
参考我之前的帖子
2. docker-compose.yml脚本配置
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
kafka:
image: wurstmeister/kafka
ports:
- "9092:9092"
links:
- zookeeper:zk
environment:
- KAFKA_ADVERTISED_HOST_NAME: 10.162.213.99
- KAFKA_ADVERTISED_PORT:9092
- KAFKA_DELETE_TOPIC_ENABLE:"true"
- KAFKA_LOG_RETENTION_HOURS:1
- KAFKA_MESSAGE_MAX_BYTES:10000000
- KAFKA_REPLICA_FETCH_MAX_BYTES:10000000
- KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS:60000
- KAFKA_NUM_PARTITIONS:2
- KAFKA_DELETE_RETENTION_MS:1000
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
kafka-manager:
image: sheepkiller/kafka-manager
links:
- kafka
- zookeeper
environment:
ZK_HOSTS: zookeeper:2181
APPLICATION_SECRET: letmein
KM_ARGS: -Djava.net.preferIPv4Stack=true
ports:
- "9000:9000"
docker-compose命令
docker-compose up -d #部署kafka
docker-compose stop #停止kafka
docker-compose start #启动kafka
docker-compose rm -f #删除kafka(删除之前必须停止kafka)
docker-compose scale kafka=n #将kafka实例增加到n个,什么都不用修改,就能直接建立一个集群
注意,scale那步操作最好立即就做了,kafka=3比较好,个人认为,没有理由。
3. kafka manager配置
接下来去kafka manager里面去,创建cluster
标题栏cluster下拉菜单选择List,进入详细参数配置。
kafka version选择0.8.2.1
勾选 enable JMX Polling
空着 JMX Auth Username
空着 JMX Auth Password
其他统统勾选
弄好之后,标题栏的Brokers点一下,会有brokers的列表,其中Ports非常重要!
kafka version选择0.8.2.1
勾选 enable JMX Polling
空着 JMX Auth Username
空着 JMX Auth Password
其他统统勾选
弄好之后,标题栏的Brokers点一下,会有brokers的列表,其中Ports非常重要!
4. 用kafka-python客户端使用kafka
pip install kafka-python
消费者
#coding=utf-8
from kafka import KafkaConsumer
consumer = KafkaConsumer('yaohui',
group_id='my-group',
bootstrap_servers=['10.162.213.99:32775','10.162.213.99:32776']) # 这二个port为brokers的二个端口
for message in consumer:
print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,
message.offset, message.key,
message.value))
#coding=utf-8
from kafka import KafkaProducer
from kafka.errors import KafkaError
producer = KafkaProducer(bootstrap_servers=['10.162.213.99:32775','10.162.213.99:32776'])
future = producer.send('yaohui', 'this is just a test')
try:
record_metadata = future.get(timeout=10)
print (record_metadata.topic)
print (record_metadata.partition)
print (record_metadata.offset)
except KafkaError:
#log.exception()
pass