1.clickhouse kafka引擎表
create table original_db.table_kafka on cluster standard_shard(uuid,userid nullable(string) ) engine=kafka() settings kafka_broker_list=’100.11.21.22:22123,100.11.21.23:22123’,kafka_topic_list=’topic1’,kafka_group_name=’Group1’,kafka_format=’JSONEachRow’,kafka_delimter=’/n’,kafka_num_consumer=’1’;
2.建本地结构表
Creat table original_db.original_table on cluster standard_shard (uuid,userid nullable(string)) engin=’replicatedreplacingmergedtree(’/clickhouse/tables/{layer}-{shard}/original_db/table’,’{replica}’) partition by data_dt primary key uuid order by uuid settings index_granulariity=8192;一定要写主键否则写入数据时(查询)会出现事多时少的现象。
3.建立分布式表用作查询
Creat table db.table on cluster standard_shard (uuid,userid nullable(string)) engin=Distrbuted(standard_shard,‘original_db’,‘table’,javaHash(uuid));最后面一个参数数据分配规则也可以用rand();
4.物化视图
Drop table original_db.table_kafka_consumer
Create materialized view original_db.table_kafka_consumer on cluster standard_shard to original_db.table (uuid,userid nullable(string)) As select • from original_db.table_kafka on cluster standard_shard
5.建立jaVabean 如果不写入磁盘不用重写toString方法做拼接Field+‘分割符’ 字段中有分隔符时要做处理不能和Kafka表的分割符号一样。
6.string javaBeanStr=JSONObect.ToJSONString(javaBean)
7.send(topic, javaBeanStr)
ProducerRecord<String, String> record = new ProducerRecord<>(“Kafka”, “Kafka_Products”, “测试”);//Topic Key Value
try{
Future future = producer.send(record);
future.get();//不关心是否发送成功,则不需要这行。
} catch(Exception e) {
e.printStackTrace();//连接错误、No Leader错误都可以通过重试解决;消息太大这类错误kafkaProducer不会进行任何重试,直接抛出异常
}
或者
for (int i = 0; i < 100; i++) {
//ProducerRecord有多个构造器,这里使用了三个参数的,topic、key、value。
producer.send(new ProducerRecord<String, String>(“topic”, Integer.toString(i), Integer.toString(i)));
}
producer.close();
2)异步发送消息
ProducerRecord<String, String> record = new ProducerRecord<>(“Kafka”, “Kafka_Products”, “测试”);//Topic Key Value
producer.send(record, new DemoProducerCallback());//发送消息时,传递一个回调对象,该回调对象必须实现org.apahce.kafka.clients.producer.Callback接口
private class DemoProducerCallback implements Callback {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {//如果Kafka返回一个错误,onCompletion方法抛出一个non null异常。
e.printStackTrace();//对异常进行一些处理,这里只是简单打印出来
}
}
}