1. 环境准备
IP | 系统 | 数据库 | 版本 | 内存 | CPU | 磁盘 |
---|---|---|---|---|---|---|
192.168.5.31 | CentOS Linux release 7.6.1810 (Core) | clickhouse | 1.1.54236 | 96GB | 2X16核 | 4X600GB sas raid 10 |
192.168.5.32 | CentOS Linux release 7.6.1810 (Core) | clickhouse | 1.1.54236 | 96GB | 2X16核 | 8X600GB sas raid 10 |
192.168.5.33 | CentOS Linux release 7.6.1810 (Core) | clickhouse | 1.1.54236 | 96GB | 2X16核 | 8X600GB sas raid 10 |
所需包:
clickhouse-client-20.8.3.18-1.el7.x86_64.rpm
clickhouse-common-static-20.8.3.18-1.el7.x86_64.rpm
clickhouse-server-20.8.3.18-1.el7.x86_64.rpm
clickhouse-server-common-20.8.3.18-1.el7.x86_64.rpm
2. 安装配置clickhouse
2.1 创建用户
useradd -m clickhouse
passwd clickhouse
clickhouse@123
2.2 安装clickhouse软件(root)
#上传clickhouse安装帮到/opt/clickhouse下:
cat /opt/clickhouse/
rpm -ivh clickhouse*
2.3 磁盘规划
su - clickhouse
mkdir -p /home/clickhouse/{log,data}
2.4 修改启动脚本
vim /etc/rc.d/init.d/clickhouse-server
CLICKHOUSE_LOGDIR=/home/clickhouse/log
CLICKHOUSE_LOGDIR_USER=clickhouse
CLICKHOUSE_DATADIR=/home/clickhouse/data
# 修改其中一个,将修改的文件发送到其他两台主机
scp -rp /etc/rc.d/init.d/clickhouse-server 192.168.5.32:/etc/rc.d/init.d/
2.5 修改config.xml配置
注意其中不同sever配置对应不同主机ip地址
修改config.xml中相关内容
vim /etc/clickhouse-server/config.xml
<log>/home/clickhouse/log/clickhouse-server.log</log>
<errorlog>/home/clickhouse/log/clickhouse-server.err.log</errorlog>
<path>/home/clickhouse/data/clickhouse/</path>
<tmp_path>/home/clickhouse/data/clickhouse/tmp/</tmp_path>
<users_config>users.xml</users_config>
<interserver_http_host>192.168.5.31</interserver_http_host>
<listen_host>::1</listen_host>
<listen_host>0.0.0.0</listen_host>
<format_schema_path>/home/clickhouse/data/clickhouse/format_schemas/</format_schema_path>
在<zookeeper incl="zookeeper-servers" optional="true" />上面
添加下面一条
<include_from>/etc/clickhouse-server/metrika.xml</include_from>
--=================================================
<!-- 修改其中一个,将修改的文件发送到其他两台主机,修改本机ip即可 -->
scp -rp /etc/clickhouse-server/config.xml 192.168.5.32:/etc/clickhouse-server/config.xml
<!--给clickhouse-server 所属为clickhouse-->
chown -R clickhouse:clickhouse /var/lock/clickhouse-server
3. 安装配置zookpeer
3.1 安装配置jdk
# 查看Java版本
java –version
#上传jdk1.8.0_271 到 /opt/java/
mkdir -p /opt/java/
#安装jdk1.8.0_271
echo " ">>/etc/profile
echo "# Made for jdk env by lele on $(date +%F)">>/etc/profile
echo 'export JAVA_HOME=/opt/java/jdk1.8.0_271'>>/etc/profile
echo 'export JRE_HOME=$JAVA_HOME/jre'>>/etc/profile
echo 'export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib'>>/etc/profile
echo 'export PATH=$JAVA_HOME/bin:$PATH'>>/etc/profile
tail -6 /etc/profile
source /etc/profile
echo $PATH
3.2 安装zookpeer
Zookeeper下载路径 | https://archive.apache.org/dist/zookeeper/zookeeper-3.4.13/zookeeper-3.4.13.tar.gz |
---|---|
Zookeeper 版本 | zookeeper-3.4.13.tar.gz |
#创建zookeeper用户
useradd zookeeper -m
passwd zookeeper
zookeeper@123
#解压安装
[postgres@docker2 ~]$ tar -zxvf zookeeper-3.4.13.tar.gz
#修改配置文件
[postgres@docker2 ~]$ cd /home/zookeeper/zookeeper-3.4.13/conf
[postgres@docker2 conf]$ vim zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/zookeeper/zookeeper-3.4.13/data
dataLogDir=/home/zookeeper/zookeeper-3.4.13/log
clientPort=2181
server.1 = 192.168.5.31:2888:3888
server.2 = 192.168.5.32:2888:3888
server.3 = 192.168.5.33:2888:3888
# 创建数据目录
mkdir /home/zookeeper/zookeeper-3.4.13/data
mkdir /home/zookeeper/zookeeper-3.4.13/log
以下每个server都分别执行一下
# server1:
$ echo 1 > /home/zookeeper/zookeeper-3.4.13/data/myid
# server2:
$ echo 2 > /home/zookeeper/zookeeper-3.4.13/data/myid
# server3:
$ echo 3 > /home/zookeeper/zookeeper-3.4.13/data/myid
3.3 设置zookpeer环境变量
vim /etc/profile
# add by zookpeer
export ZOOKEEPER_HOME=/home/zookeeper/zookeeper-3.4.13
export PATH=$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf:$PATH
3.4 设置zookeeper开机自启动
#设置zookeeper开机自启动(3台服务器都设置)
vim /etc/rc.d/rc.local
#添加以下命令(我的zookeeper是postgres用户安装的)
export JAVA_HOME=/opt/java/jdk1.8.0_271
su - zookeeper -c "/home/zookeeper/zookeeper-3.4.13/bin/zkServer.sh start"
#赋可执行权限
chmod a+x /etc/rc.d/rc.local
3.5 启动zk集群
#启动zookeeper:
/home/zookeeper/zookeeper-3.4.13/bin/zkServer.sh start
#查看zookeeper状态:
/home/zookeeper/zookeeper-3.4.13/bin/zkServer.sh status
#关闭zookeeper:
/home/zookeeper/zookeeper-3.4.13/bin/zkServer.sh stop
4. clickhouse集群配置
4.1 配置metrika.xml
<yandex>
<clickhouse_remote_servers>
<my_cluster>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>192.168.5.31</host>
<port>9000</port>
</replica>
<replica>
<host>192.168.5.32</host>
<port>9000</port>
</replica>
<replica>
<host>192.168.5.33</host>
<port>9000</port>
</replica>
</shard>
</my_cluster>
</clickhouse_remote_servers>
<zookeeper-servers>
<node index="1">
<host>192.168.5.31</host>
<port>2181</port>
</node>
<node index="2">
<host>192.168.5.32</host>
<port>2181</port>
</node>
<node index="3">
<host>192.168.5.33</host>
<port>2181</port>
</node>
</zookeeper-servers>
<macros>
<shard>01</shard>
<replica>192.168.5.31</replica>
</macros>
<networks>
<ip>::/0</ip>
</networks>
<clickhouse_compression>
<case>
<min_part_size>0</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio>
<method>lz4</method>
</case>
</clickhouse_compression>
4.2 启动cilckhouse集群
service clickhouse-server start
4.3 卸载clickhouse集群
#卸载clickhouse:yum remove clickhouse* -y
#删除剩余目录及文件
cd /var/lib/
rm -rf clickhouse/
cd /etc/
rm -rf clickhouse-*
cd /var/spool/mail
rm -f clickhouse
cd /var/lock
rm -f clickhouse-server
4.3 测试clickhouse集群
clickhouse-client -h 127.0.0.1 --port 9000 -u default -m
select 1;
select * from system.clusters\G
Row 1:
──────
cluster: my_cluster
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: 192.168.5.31
host_address: 192.168.5.31
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 2:
──────
cluster: my_cluster
shard_num: 1
shard_weight: 1
replica_num: 2
host_name: 192.168.5.32
host_address: 192.168.5.32
port: 9000
is_local: 0
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 3:
──────
cluster: my_cluster
shard_num: 1
shard_weight: 1
replica_num: 3
host_name: 192.168.5.33
host_address: 192.168.5.33
port: 9000
is_local: 0
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 4:
──────
cluster: test_cluster_two_shards
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: 127.0.0.1
host_address: 127.0.0.1
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 5:
──────
cluster: test_cluster_two_shards
shard_num: 2
shard_weight: 1
replica_num: 1
host_name: 127.0.0.2
host_address: 127.0.0.2
port: 9000
is_local: 0
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 6:
──────
cluster: test_cluster_two_shards_localhost
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: ::1
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 7:
──────
cluster: test_cluster_two_shards_localhost
shard_num: 2
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: ::1
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 8:
──────
cluster: test_shard_localhost
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: ::1
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 9:
───────
cluster: test_shard_localhost_secure
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: ::1
port: 9440
is_local: 0
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 10:
───────
cluster: test_unavailable_shard
shard_num: 1
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: ::1
port: 9000
is_local: 1
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
Row 11:
───────
cluster: test_unavailable_shard
shard_num: 2
shard_weight: 1
replica_num: 1
host_name: localhost
host_address: ::1
port: 1
is_local: 0
user: default
default_database:
errors_count: 0
estimated_recovery_time: 0
11 rows in set. Elapsed: 0.004 sec.