python操作Kafka

python操作kafka
参考: https://www.cnblogs.com/hei12138/p/7805475.html https://blog.csdn.net/zt3032/article/details/78756293 https://kafka-python.readthedocs.io/en/master/index.html https://blog.csdn.net/luanpeng825485697/java/article/details/81036028 python操作kafka 我们已经知道了kafka是一个消息队列,下面我们来学习怎么向kafka中传递数据和如何从kafka中获取数据 首先安装python的kafka库 pip install kafka 1 按照官网的样例,先跑一个应用
1、生产者demo: from kafka import KafkaProducer from kafka.errors import KafkaError producer = KafkaProducer(bootstrap_servers=[‘broker1:1234’]) # Asynchronous by default future = producer.send(‘my-topic’, b’raw_bytes’) # Block for ‘synchronous’ sends try: record_metadata = future.get(timeout=10) except KafkaError: # Decide what to do if produce request failed… log.exception() pass # Successful result returns assigned partition and offset print (record_metadata.topic) print (record_metadata.partition) print (record_metadata.offset) # produce keyed messages to enable hashed partitioning producer.send(‘my-topic’, key=b’foo’, value=b’bar’) # encode objects via msgpack producer = KafkaProducer(value_serializer=msgpack.dumps) producer.send(‘msgpack-topic’, {‘key’: ‘value’}) # produce json messages producer = KafkaProducer(value_serializer=lambda m: json.dumps(m).encode(‘ascii’)) producer.send(‘json-topic’, {‘key’: ‘value’}) # produce asynchronously for _ in range(100): producer.send(‘my-topic’, b’msg’) def on_send_success(record_metadata): print(record_metadata.topic) print(record_metadata.partition) print(record_metadata.offset) def on_send_error(excp): log.error(‘I am an errback’, exc_info=excp) # handle exception # produce asynchronously with callbacks producer.send(‘my-topic’, b’raw_bytes’).add_callback(on_send_success).add_errback(on_send_error) # block until all async messages are sent producer.flush() # configure multiple retries producer = KafkaProducer(retries=5) 启动后生产者便可以将字节流发送到kafka服务器.
2、消费者(简单demo): from kafka import KafkaConsumer consumer = KafkaConsumer(‘test’,bootstrap_servers=[‘127.0.0.1:9092’]) #参数为接收主题和kafka服务器地址 # 这是一个永久堵塞的过程,生产者消息会缓存在消息队列中,并且不删除,所以每个消息在消息队列中都有偏移 for message in consumer: # consumer是一个消息队列,当后台有消息时,这个消息队列就会自动增加.所以遍历也总是会有数据,当消息队列中没有数据时,就会堵塞等待消息带来 print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value)) 启动后消费者可以从kafka服务器获取数据.
3、消费者(消费群组) from kafka import KafkaConsumer # 使用group,对于同一个group的成员只有一个消费者实例可以读取数据 consumer = KafkaConsumer(‘test’,group_id=‘my-group’,bootstrap_servers=[‘127.0.0.1:9092’]) for message in consumer: print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value)) 启动多个消费者,只有其中某一个成员可以消费到,满足要求,消费组可以横向扩展提高处理能力
4、消费者(读取目前最早可读的消息) from kafka import KafkaConsumer consumer = KafkaConsumer(‘test’,auto_offset_reset=‘earliest’,bootstrap_servers=[‘127.0.0.1:9092’]) for message in consumer: print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value)) auto_offset_reset:重置偏移量,earliest移到最早的可用消息,latest最新的消息,默认为latest 源码定义:{‘smallest’: ‘earliest’, ‘largest’: ‘latest’} 5、消费者(手动设置偏移量) # 读取指定位置消息===== from kafka import KafkaConsumer from kafka.structs import TopicPartition consumer = KafkaConsumer(‘test’,bootstrap_servers=[‘127.0.0.1:9092’]) print(consumer.partitions_for_topic(“test”)) #获取test主题的分区信息 print(consumer.topics()) #获取主题列表 print(consumer.subscription()) #获取当前消费者订阅的主题 print(consumer.assignment()) #获取当前消费者topic、分区信息 print(consumer.beginning_offsets(consumer.assignment())) #获取当前消费者可消费的偏移量 consumer.seek(TopicPartition(topic=‘test’, partition=0), 5) #重置偏移量,从第5个偏移量消费 for message in consumer: print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value))
6、消费者(订阅多个主题) # =订阅多个消费者==== from kafka import KafkaConsumer from kafka.structs import TopicPartition consumer = KafkaConsumer(bootstrap_servers=[‘127.0.0.1:9092’]) consumer.subscribe(topics=(‘test’,‘test0’)) #订阅要消费的主题 print(consumer.topics()) print(consumer.position(TopicPartition(topic=‘test’, partition=0))) #获取当前主题的最新偏移量 for message in consumer: print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value))
7、消费者(手动拉取消息) from kafka import KafkaConsumer import time consumer = KafkaConsumer(bootstrap_servers=[‘127.0.0.1:9092’]) consumer.subscribe(topics=(‘test’,‘test0’)) while True: msg = consumer.poll(timeout_ms=5) #从kafka获取消息 print(msg) time.sleep(2)
8、消费者(消息挂起与恢复) # ====消息恢复和挂起= from kafka import KafkaConsumer from kafka.structs import TopicPartition import time consumer = KafkaConsumer(bootstrap_servers=[‘127.0.0.1:9092’]) consumer.subscribe(topics=(‘test’)) consumer.topics() consumer.pause(TopicPartition(topic=u’test’, partition=0)) # pause执行后,consumer不能读取,直到调用resume后恢复。 num = 0 while True: print(num) print(consumer.paused()) #获取当前挂起的消费者 msg = consumer.poll(timeout_ms=5) print(msg) time.sleep(2) num = num + 1 if num == 10: print(“resume…”) consumer.resume(TopicPartition(topic=‘test’, partition=0)) print(“resume…”) pause执行后,consumer不能读取,直到调用resume后恢复。 下面是一个完整的demo from kafka import KafkaConsumer # To consume latest messages and auto-commit offsets consumer = KafkaConsumer(‘my-topic’, group_id=‘my-group’, bootstrap_servers=[‘localhost:9092’]) for message in consumer: # message value and key are raw bytes – decode if necessary! # e.g., for unicode: message.value.decode('utf-8') print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, message.offset, message.key, message.value)) # consume earliest available messages, don’t commit offsets KafkaConsumer(auto_offset_reset=‘earliest’, enable_auto_commit=False) # consume json messages KafkaConsumer(value_deserializer=lambda m: json.loads(m.decode(‘ascii’))) # consume msgpack KafkaConsumer(value_deserializer=msgpack.unpackb) # StopIteration if no message after 1sec KafkaConsumer(consumer_timeout_ms=1000) # Subscribe to a regex topic pattern consumer = KafkaConsumer() consumer.subscribe(pattern=’^awesome.’) # Use multiple consumers in parallel w/ 0.9 kafka brokers # typically you would run each on a different server / process / CPU consumer1 = KafkaConsumer(‘my-topic’, group_id=‘my-group’, bootstrap_servers=‘my.server.com’) consumer2 = KafkaConsumer(‘my-topic’, group_id=‘my-group’, bootstrap_servers=‘my.server.com’) Python创建自定义的Kafka Topic client = KafkaClient(bootstrap_servers=brokers) if topic not in client.cluster.topics(exclude_internal_topics=True): # Topic不存在 request = admin.CreateTopicsRequest_v0( create_topic_requests=[( topic, num_partitions, -1, # replication unset. [], # Partition assignment. [(key, value) for key, value in configs.items()], # Configs )], timeout=timeout_ms ) future = client.send(2, request) # 2是Controller,发送给其他Node都创建失败。 client.poll(timeout_ms=timeout_ms, future=future, sleep=False) # 这里 result = future.value # error_code = result.topic_error_codes[0][1] print("CREATE TOPIC RESPONSE: ", result) # 0 success, 41 NOT_CONTROLLER, 36 ALREADY_EXISTS client.close() else: # Topic已经存在 print(“Topic already exists!”) return Python测试Kafka集群(pykafka) 生产者代码: 复制代码 # - coding:utf8 - from pykafka import KafkaClient host = ‘IP:9092, IP:9092, IP:9092’ client = KafkaClient(hosts = host) print client.topics # 生产者 topicdocu = client.topics[‘my-topic’] producer = topicdocu.get_producer() for i in range(100): print i producer.produce('test message ’ + str(i ** 2)) producer.stop() 复制代码 消费者代码: 复制代码 # - coding:utf8 *- from pykafka import KafkaClient host = ‘IP:9092, IP:9092, IP:9092’ client = KafkaClient(hosts = host) print client.topics # 消费者 topic = client.topics[‘my-topic’] consumer = topic.get_simple_consumer(consumer_group=‘test’, auto_commit_enable=True, auto_commit_interval_ms=1, consumer_id=‘test’) for message in consumer: if message is not None: print message.offset, message.value

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小金子的夏天

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值