Clickhouse的3节点/3分片/2副本/环形复制拓扑/分布式表/复制表/离线集群/部署

///离线安装/
ubuntu 16:
在有网的服务器上下载clickhouse安装包:
https://repo.yandex.ru/clickhouse/deb/stable/main/
clickhouse-client_19.4.3.11_all.deb
clickhouse-common-static_19.4.3.11_amd64.deb
clickhouse-server_19.4.3.11_all.deb
clickhouse-server-base_19.4.3.11_amd64.deb
clickhouse-server-common_19.4.3.11_all.deb
在有网的服务器上下载zk安装包:
https://archive.apache.org/dist/zookeeper/zookeeper-3.4.9/zookeeper-3.4.9.tar.gz

//用客户提供的服务器或者测试服务器,配置好离线环境的apt源
echo "172.31.135.108 ck001
172.31.135.110 ck002
172.31.135.109 ck003" > /etc/hosts

将ck和zk安装包上传到ck001, ck002, ck003的/opt/ckdeploy目录下

//1. 配置系统环境
//设置cpu频率模式
apt-get install cpufrequtils
cpufreq-set -c 0 -g performance
cat /proc/cpuinfo | grep MHz
参考:https://www.zhukun.net/archives/7572

apt-get install tzdata
echo "export TZ='Asia/Shanghai'" >> /etc/profile
echo "export LANG=zh_CN.UTF8" >> /etc/profile
source /etc/profile

swapoff -a

apt install -y openjdk-8-jdk

//2. 安装zookeeper
cd /opt/ckdeploy
tar -xzvf zookeeper-3.4.9.tar.gz
mv zookeeper-3.4.9 /opt/
cd /opt
ln -s zookeeper-3.4.9 zookeeper
cd zookeeper/conf
vim zoo.cfg
///zoo.cfg内容///
clientPort=2181
tickTime=2000
initLimit=30000
syncLimit=10
maxClientCnxns=2000
maxSessionTimeout=60000000
dataDir=/opt/zookeeper/data
dataLogDir=/opt/zookeeper/logs

autopurge.snapRetainCount=10
autopurge.purgeInterval=1
preAllocSize=131072
snapCount=3000000
leaderServes=yes
standaloneEnabled=false
server.1=ck001:2888:3888
server.2=ck002:2888:3888
server.3=ck003:2888:3888
//
cd /opt/zookeeper
mkdir data
mkdir logs
//在ck001/ck002/ck003上分别执行以上操作,然后依次执行设置zk id的操作
//ck001
echo '1' > data/myid
//ck002
echo '2' > data/myid
//ck003
echo '3' > data/myid
//将三台服务器上zk都启动
/opt/zookeeper/bin/zkServer.sh start

//3. 安装clickhouse
//在三台服务器上分别用下面方法配置完clickhouse
cd /opt/ckdeploy
dpkg -i *.deb
cd /etc/clickhouse-server
vim config.xml
///config.xml中yandex节点下添加///
<listen_host>ck001</listen_host>
<include_from>/etc/clickhouse-server/myconf.xml</include_from>
//
//注意!!!!!!!!config.xml中listen_host在三台服务器上分别配置为ck001/ck002/ck003

vim myconf.xml
///myconf.xml全部内容///
<yandex>
<clickhouse_remote_servers>
    <mycluster>
        <shard>
            <weight>1</weight>
            <internal_replication>true</internal_replication>
            <replica>
                <host>ck001</host>
                <port>9000</port>
                <default_database>mycluster_shard_1</default_database>
            </replica>
            <replica>
                <host>ck002</host>
                <port>9000</port>
                <default_database>mycluster_shard_1</default_database>
            </replica>
        </shard>
        <shard>
            <weight>1</weight>
            <internal_replication>true</internal_replication>
            <replica>
                <host>ck002</host>
                <port>9000</port>
                <default_database>mycluster_shard_2</default_database>
            </replica>
            <replica>
                <host>ck003</host>
                <port>9000</port>
                <default_database>mycluster_shard_2</default_database>
            </replica>
        </shard>
        <shard>
            <weight>1</weight>
            <internal_replication>true</internal_replication>
            <replica>
                <host>ck003</host>
                <port>9000</port>
                <default_database>mycluster_shard_3</default_database>
            </replica>
            <replica>
                <host>ck001</host>
                <port>9000</port>
                <default_database>mycluster_shard_3</default_database>
            </replica>
        </shard>
    </mycluster>
</clickhouse_remote_servers>
<zookeeper-servers>
        <node index="1">
            <host>ck001</host>
            <port>2181</port>
        </node>
        <node index="2">
            <host>ck002</host>
            <port>2181</port>
        </node>
        <node index="3">
            <host>ck003</host>
            <port>2181</port>
        </node>
</zookeeper-servers>
</yandex>
//

mkdir users.d
cd users.d
vim huxl.xml
///huxl.xml全部内容///
<yandex>
    <users>
      <huxl>
          <profile>default</profile>
          <networks>
               <ip>::/0</ip>
          </networks>
          <password>xxxadmin</password>
          <quota>default</quota>
      </huxl>
    </users>
</yandex>
//

//分别在三台服务器上启动clickhouse服务
sudo service clickhouse-server start
//常用配置
//配置文件: /etc/clickhouse-server下
//ck服务器错误日志:/var/log/clickhouse-server/

///初始化3节点3分片2副本的集群/
参考:https://www.altinity.com/blog/2018/5/10/circular-replication-cluster-topology-in-clickhouse
在ck001服务器上通过客户端执行
clickhouse-client -h ck001 -u huxl --password xxxadmin
create database mycluster_shard_1;
create database mycluster_shard_2;
create database mycluster_shard_3;
在ck002服务器上通过客户端执行
clickhouse-client -h ck002 -u huxl --password xxxadmin
create database mycluster_shard_1;
create database mycluster_shard_2;
create database mycluster_shard_3;
在ck003服务器上通过客户端执行
clickhouse-client -h ck003 -u huxl --password xxxadmin
create database mycluster_shard_1;
create database mycluster_shard_2;
create database mycluster_shard_3;

//在ck001上执行
create table mycluster_shard_1.tbl_rep( \
  c1 Int32, \
  mydate Date \
) engine = ReplicatedMergeTree('/clickhouse/tables/1/dtbl', '1', mydate, (mydate), 8192);

//在ck002上执行
create table mycluster_shard_1.tbl_rep ( \
  c1 Int32,\
  mydate Date\
) engine = ReplicatedMergeTree('/clickhouse/tables/1/dtbl', '2', mydate, (mydate), 8192);

//在ck002上执行
create table mycluster_shard_2.tbl_rep (\
  c1 Int32,\
  mydate Date\
) engine = ReplicatedMergeTree('/clickhouse/tables/2/dtbl', '1', mydate, (mydate), 8192);

//在ck003上执行
create table mycluster_shard_2.tbl_rep (\
  c1 Int32,\
  mydate Date\
) engine = ReplicatedMergeTree('/clickhouse/tables/2/dtbl', '2', mydate, (mydate), 8192);

//在ck003上执行
create table mycluster_shard_3.tbl_rep (\
  c1 Int32,\
  mydate Date\
) engine = ReplicatedMergeTree('/clickhouse/tables/3/dtbl', '1', mydate, (mydate), 8192);

//在ck001上执行
create table mycluster_shard_3.tbl_rep (\
  c1 Int32,\
  mydate Date\
) engine = ReplicatedMergeTree('/clickhouse/tables/3/dtbl', '2', mydate, (mydate), 8192);

//在ck001上执行

use default;
create table dtbl (\
  c1 Int32,\
  mydate Date\
) engine=Distributed('mycluster', '', tbl_rep, rand());

insert into table dtbl values(2, '2019-04-19')
select * from dtbl;

//至此结束

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值