clickhouse 3节点Distributed架构部署

/etc/hosts

192.168.1.14    ch14
192.168.1.15    ch15
192.168.1.16    ch16

zoo.cfg

[root@ch14 ~]# cat /home/i2/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir= /home/i2/zookeeper/data/zookeeper
dataLogDir= /home/i2/zookeeper/log/zookeeper
clientPort=2182
autopurge.purgeInterval=0
zookeeper.sasl.client=false
globalOutstandingLimit=200
server.14=ch14:2888:3888
server.15=ch15:2888:3888
server.16=ch16:2888:3888

clickhouse-server/config.xml

[root@ch14 ~]# cat /etc/clickhouse-server/config.xml
<?xml version="1.0"?>
<yandex>
        <!--日志-->
        <logger>
                <level>warning</level>
                <log>/data/clickhouse/logs/clickhouse.log</log>
                <errorlog>/data/clickhouse/logs/error.log</errorlog>
                <size>500M</size>
                <count>5</count>
        </logger>
        <!--本地节点信息-->
        <http_port>8123</http_port>
        <tcp_port>9000</tcp_port>
        <interserver_http_port>9009</interserver_http_port>
        <interserver_http_host>ch14</interserver_http_host>
        <!--本机域名或IP-->
        <!--本地配置-->
        <listen_host>ch14</listen_host>
        <max_connections>2048</max_connections>
        <keep_alive_timeout>3</keep_alive_timeout>
        <max_concurrent_queries>64</max_concurrent_queries>
        <uncompressed_cache_size>4294967296</uncompressed_cache_size>
        <mark_cache_size>5368709120</mark_cache_size>
        <path>/data/clickhouse/ch14/</path>
        <tmp_path>/data/clickhouse/ch14/tmp/</tmp_path>
        <users_config>/data/clickhouse/ch14/users.xml</users_config>
        <default_profile>default</default_profile>
        <query_log>
                <database>system</database>
                <table>query_log</table>
                <partition_by>toMonday(event_date)</partition_by>
                <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </query_log>
        <query_thread_log>
                <database>system</database>
                <table>query_thread_log</table>
                <partition_by>toMonday(event_date)</partition_by>
                <flush_interval_milliseconds>7500</flush_interval_milliseconds>
        </query_thread_log>
        <prometheus>
                <endpoint>/metrics</endpoint>
                <port>8001</port>
                <metrics>true</metrics>
                <events>true</events>
                <asynchronous_metrics>true</asynchronous_metrics>
        </prometheus>
        <default_database>default</default_database>
        <timezone>Asia/Shanghai</timezone>
        <!--集群相关配置-->
        <remote_servers incl="clickhouse_remote_servers" />
        <zookeeper incl="zookeeper-servers" optional="true" />
        <macros incl="macros" optional="true" />
        <builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
        <max_session_timeout>3600</max_session_timeout>
        <default_session_timeout>300</default_session_timeout>
        <max_table_size_to_drop>0</max_table_size_to_drop>
        <merge_tree>
                <parts_to_delay_insert>300</parts_to_delay_insert>
                <parts_to_throw_insert>600</parts_to_throw_insert>
                <max_delay_to_insert>2</max_delay_to_insert>
        </merge_tree>
        <max_table_size_to_drop>0</max_table_size_to_drop>
        <max_partition_size_to_drop>0</max_partition_size_to_drop>
        <distributed_ddl>
                <!-- Path in ZooKeeper to queue with DDL queries -->
                <path>/clickhouse/task_queue/ddl</path>
        </distributed_ddl>
        <include_from>/data/clickhouse/ch14/metrika.xml</include_from>
</yandex>

metrika.xml

[root@ch14 ~]# cat /data/clickhouse/ch14/metrika.xml
<?xml version="1.0"?>
<yandex>
        <clickhouse_remote_servers>
                <perftest_3shards_1replicas>
                        <shard>
                                <internal_replication>true</internal_replication>
                                <replica>
                                        <host>ch14</host>
                                        <port>9000</port>
                                </replica>
                        </shard>
                        <shard>
                                <replica>
                                        <internal_replication>true</internal_replication>
                                        <host>ch15</host>
                                        <port>9000</port>
                                </replica>
                        </shard>
                        <shard>
                                <internal_replication>true</internal_replication>
                                <replica>
                                        <host>ch16</host>
                                        <port>9000</port>
                                </replica>
                        </shard>
                </perftest_3shards_1replicas>
         <test_cluster_one_shards_three_replication>
            <shard>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>ch14</host>
                    <port>9000</port>
                </replica>
                <replica>
                    <host>ch15</host>
                    <port>9000</port>
                </replica>
                <replica>
                    <host>ch16</host>
                    <port>9000</port>
                </replica>
                 </shard>
        </test_cluster_one_shards_three_replication>

        </clickhouse_remote_servers>

        <!--zookeeper相关配置-->
        <zookeeper-servers>
                <node index="14">
                        <host>ch14</host>
                        <port>2182</port>
                </node>
                <node index="15">
                        <host>ch15</host>
                        <port>2182</port>
                </node>
                <node index="16">
                        <host>ch16</host>
                        <port>2182</port>
                </node>
        </zookeeper-servers>

        <macros>
                <replica>ch14</replica>
        </macros>

        <networks>
                <ip>::/0</ip>
        </networks>

        <clickhouse_compression>
                <case>
                        <min_part_size>10000000000</min_part_size>
                        <min_part_size_ratio>0.01</min_part_size_ratio>
                        <method>lz4</method>
                </case>
        </clickhouse_compression>

</yandex>

创建数据库

create database testdb on cluster perftest_3shards_1replicas

创建本地表

clickhouse-client -u default --port 9000 -hch16 --query="CREATE TABLE testdb.sbtest_local(EventDate DateTime,CounterID UInt32,UserID UInt32) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate)ORDER BY (CounterID, EventDate, intHash32(UserID))SETTINGS index_granularity = 8192;"

创建分区表

clickhouse-client -u default  --port 9000 -hch14 --query="CREATE TABLE testdb.sbtest (EventDate DateTime,CounterID UInt32,UserID UInt32) ENGINE = Distributed(perftest_3shards_1replicas,testdb, sbtest_local, rand())"

insert.sh


[root@ch14 home]# cat insert.sh
#!/bin/bash

for((i=1;i<=100000000000;i++));
do
sleep 0.5
clickhouse-client -u default  --port 9000 -hch14 --query="insert into testdb.sbtest_local VALUES (now(), 30000, 30000)"
clickhouse-client -u default  --port 9000 -hch15 --query="insert into testdb.sbtest_local VALUES (now(), 30000, 30000)"
clickhouse-client -u default  --port 9000 -hch16 --query="insert into testdb.sbtest_local VALUES (now(), 30000, 30000)"
done

统计分区表

clickhouse-client -u default --port 9000 -hch15 --query="select count(*) from testdb.sbtest"

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值