clickhouse集群部署

主机ip
10.0.0.90
10.0.0.91
10.0.0.92

zookeerper容器安装

  • 安装好容器环境
cat >> /etc/sysctl.conf <<EOF
net.ipv4.ip_forward = 1
vm.max_map_count = 262144
EOF
sysctl -p
yum install -y yum-utils device-mapper-persistent-data lvm2
curl -o /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum install -y docker-ce
systemctl enable docker
systemctl start docker
创建对应挂在目录
创建 zookeeper 节点配置存放目录config、数据存放目录data、数据日志存放目录datalog、日志存放目录logs
mkdir -p /data/zookeeper1/{conf,data,datalog,logs}
mkdir -p /data/zookeeper2/{conf,data,datalog,logs}
mkdir -p /data/zookeeper3/{conf,data,datalog,logs}
启动zookeeper容器
#启动命令三个节点一致只需要更改ZOO_MY_ID 和 --name 
docker run -d --restart always --net=host --name zookeeper-node1\
-e TZ="Asia/Shanghai"\
-e ZOO_MY_ID=1\
-e "ZOO_SERVERS=server.1=10.0.90:2888:3888;2181 server.2=10.0.91:2888:3888;2181 server.3=10.0.92:2888:3888;2181"\
-v /data/zookeeper1/data:/data\ 
-v /data/zookeeper1/datalog:/datalog\
-v /data/zookeeper1/logs:/logs 
zookeeper:3.8.2 


#如果单独配值zoo.cfg
-v /data/zookeeper1/conf/zoo.cfg:/conf/zoo.cfg
验证是否成功
#3个端口:
1、2181:对cline端提供服务
2、3888:选举leader使用
3、2888:集群内机器通讯使用(Leader监听此端口)
# 查看 zookeeper 状态 
docker exec -it zookeeper-node1 zkServer.sh status
如果想单独配置zoo.cfg
vim /home/zookeeper/node1/conf/zoo.cfg
# Zookeeper保存数据的目录,默认情况下,Zookeeper将写数据的日志文件也保存在这个目录里
dataDir=/data
# 事物日志存储地点,如果没提供的话使用的则是 dataDir
dataLogDir=/datalog
# 服务器之间或客户端与服务器之间维持心跳的时间间隔,也就是每个 tickTime 时间就会发送一个心跳。tickTime以毫秒为单位
tickTime=2000
# 集群中的follower服务器(F)与leader服务器(L)之间初始连接时能容忍的最多心跳数(tickTime的数量)
initLimit=5
# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数(tickTime的数量)
syncLimit=2
# 默认值为3,不支持以系统属性方式配置。用于配置Zookeeper在自动清理的时候需要保留的快照数据文件数量和对应的事务日志文件。此参数的最小值为3,如果配置的值小于3会自动调整到3
autopurge.snapRetainCount=3
# 默认值为0,单位为小时,不支持以系统属性方式配置。用于配置Zookeeper进行历史文件自动清理的频率。如果配置为0或负数,表示不需要开启定时清理功能
autopurge.purgeInterval=0
# 默认为60,不支持以系统属性方式配置。从Socket层面限制单个客户端与单台服务器之间的并发连接数,即以ip地址来进行连接数的限制。
# 如果设置为0,表示不做任何限制。仅仅是单台客户端与单个Zookeeper服务器连接数的限制,不能控制所有客户端的连接数总和
maxClientCnxns=60
# 3.5.0中的新功能:当设置为false时,可以在复制模式下启动单个服务器,单个参与者可以使用观察者运行,并且群集可以重新配置为一个节点,并且从一个节点。
# 对于向后兼容性,默认值为true。可以使用QuorumPeerConfig的setStandaloneEnabled方法或通过将“standaloneEnabled = false”或“standaloneEnabled = true”添加到服务器的配置文件来设置它。
standaloneEnabled=false
# 内嵌的管理控制台,停用这个服务
admin.enableServer=false
# 开启四字命令,将所有命令添加到白名单中
4lw.commands.whitelist=*
# 集群中服务的列表
server.1=10.0.0.90:2888:3888;2181
server.2=10.0.0.91:2888:3888;2181
server.3=10.0.0.93:2888:3888;2181

clickhouse

官网:https://clickhouse.com/docs/zh/getting-started/install

进程部署
#进程部署
useradd clickhouse
mkdir -p /data/clickhouse
yum install -y yum-utils
wget https://packages.clickhouse.com/rpm/stable/clickhouse-client-23.3.8.21.x86_64.rpm --no-check-certificate
wget https://packages.clickhouse.com/rpm/stable/clickhouse-server-23.3.8.21.x86_64.rpm --no-check-certificate
wget https://packages.clickhouse.com/rpm/stable/clickhouse-common-static-23.3.8.21.x86_64.rpm --no-check-certificate
yum localinstall -y *.rpm
#集群更改的地方
/etc/clickhouse-server:服务端的配置文件目录,包括全局配置config.xml和用户配置users.xml等。
vi /etc/clickhouse-server/config.xml
#启动方法一
/etc/init.d/clickhouse-server start
#方法二
sudo -u 'clickhouse' /usr/bin/clickhouse-server --config-file /etc/clickhouse-server/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid
config.xml
#需要取消注释集群部署是台主机需要打开
    <!-- Same for hosts without support for IPv6: -->
    <listen_host>0.0.0.0</listen_host>
#需要添加的地方有三个需要在/etc/clickhouse-server/下创建一个metrika.xml文件
#include_from指定好位置。
    <include_from>/etc/clickhouse-server/metrika.xml</include_from>
    <remote_servers incl="clickhouse_remote_servers">
    </remote_servers>

    <include_from>/etc/clickhouse-server/metrika.xml</include_from>
    <zookeeper incl="zookeeper-servers" >
    </zookeeper>

    <include_from>/etc/clickhouse-server/metrika.xml</include_from>
    <macros incl="macros">
    </macros>    

####users.xml

<clickhouse>
    <!-- See also the files in users.d directory where the settings can be overridden. -->

    <!-- Profiles of settings. -->
    <profiles>
        <!-- Default settings. -->
        <default>
        </default>

        <!-- Profile that allows only read queries. -->
        <readonly>
            <readonly>1</readonly>
        </readonly>
    </profiles>

    <!-- Users and ACL. -->
    <users>
        <default>
            <password>123456</password>
            <networks>
                <ip>::/0</ip>
            </networks>
            <profile>default</profile>
            <quota>default</quota>
            <access_management>1</access_management>
        </default>
        <admin>
            <password>123456</password>
            <networks>
                <ip>::/0</ip>
            </networks>
            <profile>default</profile>
            <quota>default</quota>
        </admin>
    </users>

    <!-- Quotas. -->
    <quotas>
        <!-- Name of quota. -->
        <default>
            <!-- Limits for time interval. You could specify many intervals with different limits. -->
            <interval>
                <!-- Length of interval. -->
                <duration>3600</duration>
                <!-- No limits. Just calculate resource usage for time interval. -->
                <queries>0</queries>
                <errors>0</errors>
                <result_rows>0</result_rows>
                <read_rows>0</read_rows>
                <execution_time>0</execution_time>
            </interval>
        </default>
    </quotas>
</clickhouse>
metrika.xml
#创建3分片1副本
#三个集群只有macros是不同的
cat /data/clickhouse3/conf/metrika.xml
<yandex>
    <clickhouse_remote_servers>
        <remote_servers>
            <shard>
                <weight>1</weight>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>10.0.0.90</host>
                    <port>9000</port>
                    <user>default</user>
                    <password>123456</password>
                </replica>
                <replica>
                    <host>10.0.0.91</host>
                    <port>9001</port>
                    <user>default</user>
                    <password>123456</password>
                </replica>
            </shard>
            <shard>
                <weight>1</weight>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>10.0.0.91</host>
                    <port>9000</port>
                    <user>default</user>
                    <password>123456</password>
                </replica>
                <replica>
                    <host>10.0.0.92</host>
                    <port>9001</port>
                    <user>default</user>
                    <password>123456</password>
                </replica>
            </shard>
            <shard>
                <weight>1</weight>
                <internal_replication>true</internal_replication>
                <replica>
                    <host>10.0.0.92</host>
                    <port>9000</port>
                    <user>default</user>
                    <password>123456</password>
                </replica>
                <replica>
                    <host>10.0.0.90</host>
                    <port>9001</port>
                    <user>default</user>
                    <password>123456</password>
                </replica>
            </shard>
        </remote_servers>
    </clickhouse_remote_servers>
    <zookeeper-servers>
        <node>
            <host>10.0.0.90</host>
            <port>2181</port>
        </node>
        <node>
            <host>10.0.0.91</host>
            <port>2181</port>
        </node>
        <node>
            <host>10.0.0.92</host>
            <port>2181</port>
        </node>
    </zookeeper-servers>
    <macros>
     <shard>03</shard>
     <replica>cluster01-03-01</replica>
     <shard>03</shard>
     <replica>cluster01-03-02</replica>
   </macros>
</yandex>
编写dockerfile
FROM centos:7
# clickhouse23.3.8.21镜像
ARG gosu_ver=1.10
RUN mkdir -p /data/clickhouse/
COPY ./clickhouse-client-23.3.8.21.x86_64.rpm /data/clickhouse/
COPY ./clickhouse-common-static-23.3.8.21.x86_64.rpm /data/clickhouse/
COPY ./clickhouse-server-23.3.8.21.x86_64.rpm  /data/clickhouse/
COPY ./entrypoint.sh  /data/clickhouse/entrypoint.sh
ADD https://github.com/tianon/gosu/releases/download/1.10/gosu-amd64 /bin/gosu
WORKDIR /data/clickhouse/
RUN echo "*    -    nofile  65535" >>/etc/security/limits.conf\
    && ulimit -SHn 65535\
    && curl  -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo\
    && curl  -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo\
    && yum update -y\
    && yum -y install epel-release yum-utils sudo\
    && ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime\
    && echo 'Asia/Shanghai' >/etc/timezone\
    && useradd clickhouse\
    && yum localinstall -y *.rpm\
    && chown clickhouse:clickhouse -R /var/run/clickhouse-server/\
    && chmod +x /data/clickhouse/entrypoint.sh\
    && chmod +x /bin/gosu
EXPOSE 9000
EXPOSE 8123
EXPOSE 9004
EXPOSE 9009
VOLUME /var/log/clickhouse
VOLUME /etc/clickhouse-server/
CMD ["/data/clickhouse/entrypoint.sh"]

docker build -t test:v1 .
entrypoint.sh
cat /data/clickhouse1/entrypoint.sh


#!/bin/bash

set -eo pipefail
shopt -s nullglob

DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
    DO_CHOWN=0
fi

CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"

# support --user
if [ "$(id -u)" = "0" ]; then
    USER=$CLICKHOUSE_UID
    GROUP=$CLICKHOUSE_GID
    if command -v gosu &> /dev/null; then
        gosu="gosu $USER:$GROUP"
    elif command -v su-exec &> /dev/null; then
        gosu="su-exec $USER:$GROUP"
    else
        echo "No gosu/su-exec detected!"
        exit 1
    fi
else
    USER="$(id -u)"
    GROUP="$(id -g)"
    gosu=""
    DO_CHOWN=0
fi

# set some vars
CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}"

if ! $gosu test -f "$CLICKHOUSE_CONFIG" -a -r "$CLICKHOUSE_CONFIG"; then
    echo "Configuration file '$CLICKHOUSE_CONFIG' isn't readable by user with id '$USER'"
    exit 1
fi

# get CH directories locations
DATA_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=path || true)"
TMP_DIR="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=tmp_path || true)"
USER_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=user_files_path || true)"
LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.log || true)"
LOG_DIR=""
if [ -n "$LOG_PATH" ]; then LOG_DIR="$(dirname "$LOG_PATH")"; fi
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=logger.errorlog || true)"
ERROR_LOG_DIR=""
if [ -n "$ERROR_LOG_PATH" ]; then ERROR_LOG_DIR="$(dirname "$ERROR_LOG_PATH")"; fi
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=format_schema_path || true)"

CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
CLICKHOUSE_DB="${CLICKHOUSE_DB:-}"
CLICKHOUSE_ACCESS_MANAGEMENT="${CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT:-0}"

for dir in "$DATA_DIR" \
  "$ERROR_LOG_DIR" \
  "$LOG_DIR" \
  "$TMP_DIR" \
  "$USER_PATH" \
  "$FORMAT_SCHEMA_PATH"
do
    # check if variable not empty
    [ -z "$dir" ] && continue
    # ensure directories exist
    if ! mkdir -p "$dir"; then
        echo "Couldn't create necessary directory: $dir"
        exit 1
    fi

    if [ "$DO_CHOWN" = "1" ]; then
        # ensure proper directories permissions
        # but skip it for if directory already has proper premissions, cause recursive chown may be slow
        if [ "$(stat -c %u "$dir")" != "$USER" ] || [ "$(stat -c %g "$dir")" != "$GROUP" ]; then
            chown -R "$USER:$GROUP" "$dir"
        fi
    elif ! $gosu test -d "$dir" -a -w "$dir" -a -r "$dir"; then
        echo "Necessary directory '$dir' isn't accessible by user with id '$USER'"
        exit 1
    fi
done

# if clickhouse user is defined - create it (user "default" already exists out of box)
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
    echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
    cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
    <clickhouse>
      <!-- Docs: <https://clickhouse.com/docs/en/operations/settings/settings_users/> -->
      <users>
        <!-- Remove default user -->
        <default remove="remove">
        </default>

        <${CLICKHOUSE_USER}>
          <profile>default</profile>
          <networks>
            <ip>::/0</ip>
          </networks>
          <password>${CLICKHOUSE_PASSWORD}</password>
          <quota>default</quota>
          <access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
        </${CLICKHOUSE_USER}>
      </users>
    </clickhouse>
EOT
fi

if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
    # port is needed to check if clickhouse-server is ready for connections
    HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port)"

    # Listen only on localhost until the initialization is done
    $gosu /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
    pid="$!"

    # check if clickhouse is ready to accept connections
    # will try to send ping clickhouse via http_port (max 12 retries by default, with 1 sec timeout and 1 sec delay between retries)
    tries=${CLICKHOUSE_INIT_TIMEOUT:-12}
    while ! wget --spider -T 1 -q "http://127.0.0.1:$HTTP_PORT/ping" 2>/dev/null; do
        if [ "$tries" -le "0" ]; then
            echo >&2 'ClickHouse init process failed.'
            exit 1
        fi
        tries=$(( tries-1 ))
        sleep 1
    done

    clickhouseclient=( clickhouse-client --multiquery --host "127.0.0.1" -u "$CLICKHOUSE_USER" --password "$CLICKHOUSE_PASSWORD" )

    echo

    # create default database, if defined
    if [ -n "$CLICKHOUSE_DB" ]; then
        echo "$0: create database '$CLICKHOUSE_DB'"
        "${clickhouseclient[@]}" -q "CREATE DATABASE IF NOT EXISTS $CLICKHOUSE_DB";
    fi

    for f in /docker-entrypoint-initdb.d/*; do
        case "$f" in
            *.sh)
                if [ -x "$f" ]; then
                    echo "$0: running $f"
                    "$f"
                else
                    echo "$0: sourcing $f"
                    # shellcheck source=/dev/null
                    . "$f"
                fi
                ;;
            *.sql)    echo "$0: running $f"; "${clickhouseclient[@]}" < "$f" ; echo ;;
            *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${clickhouseclient[@]}"; echo ;;
            *)        echo "$0: ignoring $f" ;;
        esac
        echo
    done

    if ! kill -s TERM "$pid" || ! wait "$pid"; then
        echo >&2 'Finishing of ClickHouse init process failed.'
        exit 1
    fi
fi

# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
    # Watchdog is launched by default, but does not send SIGINT to the main process,
    # so the container can't be finished by ctrl+c
    CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
    export CLICKHOUSE_WATCHDOG_ENABLE
    exec $gosu /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi

# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
exec "$@"
容器启动
#挂载路径
mkdir -p /data/clickhouse1/{data,logs,conf}
mkdir -p /data/clickhouse2/{data,logs,conf}
mkdir -p /data/clickhouse3/{data,logs,conf}
#容器默认版本22.1.3.7
docker pull clickhouse/clickhouse-server
#启动容器集群不一样metrika.xml和--name不同
docker run -d --net=host --name clickhouse-server1 \
--ulimit nofile=262144:262144 \
-v /data/clickhouse1/data:/var/lib/clickhouse \
-v /data/clickhouse1/logs:/var/log \
-v /data/clickhouse1/conf/metrika.xml:/etc/clickhouse-server/metrika.xml \
-v /data/clickhouse1/conf/config.xml:/etc/clickhouse-server/config.xml \
-v /data/clickhouse1/conf/users.xml:/etc/clickhouse-server/users.xml \
clickhouse-server
容器检验
#检验
docker exec -it clickhouse-server01 clickhouse-client
#查看集群
select * from `system`.clusters;
#查看分片
select * from system.macros;
#查看zookeeper
SELECT * FROM system.zookeeper WHERE path = '/clickhouse';
#端口
9000 9004 9009 8123
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值