Vitess本地安装

总览

IP

MySQL1MySQL2MySQL3vttabletvtctldvtgate

10.216.91.116

3308

3313

3314

vttablet_100,200,300,400

 

vtgate:15306

10.216.91.117

3308

3313

3314

vttablet_101,201,301,401

vtctld

vtgate:15306

10.216.91.118

3308

3313

3314

vttablet_102,202,302,402

 

vtgate:15306

10.216.91.117

zk1

10.216.91.118

zk2

10.216.91.119

zk3  

安装vitess

下载解压vitess

sudo mkdir /data/service/vitess

sudo tar -xvzf vitess-6.0.20-20200818-90741b8.tar.gz -C /data/service/vitess --strip-components 1

sudo chown -R mysql.mysql /data/service/vitess 

配置环境变量

vim /home/mysql/.bashrc 
export TOPO=zk2 
export VTROOT=/data/service/vitess 
export VTTOP=$VTROOT 
#export MYSQL_FLAVOR=MySQL57 
export VTDATAROOT=/data/service_data/vitess/vtdataroot 
export PATH=${VTROOT}/bin:${PATH}

source /home/mysql/.bashrc

创建vtdataroot目录

sudo mkdir /data/service_data/vitess

sudo chown -R mysql.mysql /data/service_data/vitess

vttablet节点配置hosts文件

10.216.91.117 zk_server1

10.216.91.118 zk_server2

10.216.91.119 zk_server3

编辑env.sh文件

zookeeper server 已经提前部署完成,此处不做详细介绍。
zoo.cfg

# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/data/service_data/zookeeper/data
# the port at which the clients will connect
clientPort=2881
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the 
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=10.216.91.117:2882:3882
server.2=10.216.91.118:2882:3882
server.3=10.216.91.119:2882:3882

编辑env.sh脚本,配置zk信息

hostname=$(hostname -f)
hostname1=zk_server1
hostname2=zk_server2
hostname3=zk_server3
vtctld_web_port=15000
vtctld_host="10.216.91.117"
export VTDATAROOT="${VTDATAROOT:-${PWD}/vtdataroot}"

function fail() {
  echo "ERROR: $1"
  exit 1
}

if [[ $EUID -eq 0 ]]; then
  fail "This script refuses to be run as root. Please switch to a regular user."
fi

# mysqld might be in /usr/sbin which will not be in the default PATH
PATH="/usr/sbin:$PATH"
for binary in mysqld curl vtctlclient vttablet vtgate vtctld; do
  command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/docs/get-started/local/ for install instructions."
done;

if [ "${TOPO}" = "zk2" ]; then
    # Set topology environment parameters.
    ZK_SERVER="${hostname1}:2881,${hostname2}:2881,${hostname3}:2881"
    # shellcheck disable=SC2034
    TOPOLOGY_FLAGS="-topo_implementation zk2 -topo_global_server_address ${ZK_SERVER} -topo_global_root /vitess/global"

    mkdir -p "${VTDATAROOT}/tmp"
elif [ "${TOPO}" = "k8s" ]; then
    # Set topology environment parameters.
    K8S_ADDR="localhost"
    K8S_PORT="8443"
    K8S_KUBECONFIG=$VTDATAROOT/tmp/k8s.kubeconfig
    # shellcheck disable=SC2034
    TOPOLOGY_FLAGS="-topo_implementation k8s -topo_k8s_kubeconfig ${K8S_KUBECONFIG} -topo_global_server_address ${K8S_ADDR}:${K8S_PORT} -topo_global_root /vitess/global"
else
    ETCD_SERVER="localhost:2379"
    TOPOLOGY_FLAGS="-topo_implementation etcd2 -topo_global_server_address $ETCD_SERVER -topo_global_root /vitess/global"

    mkdir -p "${VTDATAROOT}/etcd"
fi

mkdir -p "${VTDATAROOT}/tmp"

# Set aliases to simplify instructions.
# In your own environment you may prefer to use config files,
# such as ~/.my.cnf

alias mysql="command mysql -h 127.0.0.1 -P 15306"
alias vtctlclient="command vtctlclient -server localhost:15999 -log_dir ${VTDATAROOT}/tmp -alsologtostderr"

# Make sure aliases are expanded in non-interactive shell
shopt -s expand_aliases

启动zk,初始化topo服务

编辑zk-up.sh脚本

#!/bin/bash
# start zk
source ./env.sh

if [ "${TOPO}" = "zk2" ]; then
    CELL=zone1
fi

cell=${CELL:-'test'}

echo $TOPOLOGY_FLAGS
echo $ZK_SERVER

# Add the CellInfo description for the $CELL cell.
# If the node already exists, it's fine, means we used existing data.

set +e
vtctl $TOPOLOGY_FLAGS AddCellInfo \
  -root /vitess/$cell \
  -server_address $ZK_SERVER \
  $cell
set -e
echo "Configured zk servers."

初始化topo服务

sh scripts/zk-up.sh

注意:

  • 如果多个vitess集群使用同一个toplogy server,需要配置不同的-topo_global_root 目录作为区分。

 

登陆zkserver查看topo信息是否完成

sh zkCli.sh -server 10.216.91.118:2881

启动vtctld

编辑vtctld-up.sh

#!/bin/bash

# Copyright 2019 The Vitess Authors.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This is an example script that starts vtctld.

source ./env.sh

CELL="zone1"
cell=${CELL:-'test'}
grpc_port=15999

echo "Starting vtctld..."
# shellcheck disable=SC2086
vtctld \
 $TOPOLOGY_FLAGS \
 -cell $cell \
 -workflow_manager_init \
 -workflow_manager_use_election \
 -service_map 'grpc-vtctl' \
 -backup_storage_implementation file \
 -file_backup_storage_root $VTDATAROOT/backups \
 -log_dir $VTDATAROOT/tmp \
 -port $vtctld_web_port \
 -grpc_port $grpc_port \
 -pid_file $VTDATAROOT/tmp/vtctld.pid \
  > $VTDATAROOT/tmp/vtctld.out 2>&1 &

启动脚本

sh scripts/vtctld-up.sh

登陆vtctld web服务

http://10.216.91.117:15000/

启动vttablet

创建vttablet配置文件

mkdir /data/service/vitess/config/tablet
cd /data/service/vitess/config/tablet
vim tablet_100_commerce.yaml
tabletID: zone1-100

init:
  dbName: db1          # init_db_name_override
  keyspace: commerce         # init_keyspace
  shard: 0             # init_shard
  tabletType: replica       # init_tablet_type
  timeoutSeconds: 60 # init_timeout

db:
  socket:     # db_socket
  host: 10.216.91.117      # db_host
  port: 3308     # db_port
  charSet: utf8   # db_charset
  flags: 0    # db_flags
  flavor: MySQL8    # db_flavor
  sslCa:      # db_ssl_ca
  sslCaPath:  # db_ssl_ca_path
  sslCert:    # db_ssl_cert
  sslKey:     # db_ssl_key
  serverName: # db_server_name
  connectTimeoutMilliseconds: 0 # db_connect_timeout_ms
  app:
    user: msandbox              # db_app_user
    password: msandbox          # db_app_password
    useSsl: true                # db_app_use_ssl
    preferTcp: false
  dba:
    user: dba      # db_dba_user
    password: doumi1.q         # db_dba_password
    useSsl: true      # db_dba_use_ssl
    preferTcp: false
  filtered:
    user: msandbox      # db_filtered_user
    password: msandbox  # db_filtered_password
    useSsl: true        # db_filtered_use_ssl
    preferTcp: false
  repl:
    user: dmrepl           # db_repl_user
    password: doumi1.q     # db_repl_password
    useSsl: true           # db_repl_use_ssl
    preferTcp: false
  appdebug:
    user: msandbox            # db_appdebug_user
    password: msandbox        # db_appdebug_password
    useSsl: true              # db_appdebug_use_ssl
    preferTcp: false
  allprivs:
    user: dba                 # db_allprivs_user
    password: doumi1.q        # db_allprivs_password
    useSsl: true              # db_allprivs_use_ssl
    preferTcp: false

oltpReadPool:
  size: 16                 # queryserver-config-pool-size
  timeoutSeconds: 0        # queryserver-config-query-pool-timeout
  idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
  prefillParallelism: 0    # queryserver-config-pool-prefill-parallelism
  maxWaiters: 50000        # queryserver-config-query-pool-waiter-cap

olapReadPool:
  size: 200                # queryserver-config-stream-pool-size
  timeoutSeconds: 0        # queryserver-config-query-pool-timeout
  idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
  prefillParallelism: 0    # queryserver-config-stream-pool-prefill-parallelism
  maxWaiters: 0

txPool:
  size: 20                 # queryserver-config-transaction-cap
  timeoutSeconds: 1        # queryserver-config-txpool-timeout
  idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
  prefillParallelism: 0    # queryserver-config-transaction-prefill-parallelism
  maxWaiters: 50000        # queryserver-config-txpool-waiter-cap

oltp:
  queryTimeoutSeconds: 30 # queryserver-config-query-timeout
  txTimeoutSeconds: 30    # queryserver-config-transaction-timeout
  maxRows: 10000          # queryserver-config-max-result-size
  warnRows: 0             # queryserver-config-warn-result-size

healthcheck:
  intervalSeconds: 20             # health_check_interval
  degradedThresholdSeconds: 30    # degraded_threshold
  unhealthyThresholdSeconds: 7200 # unhealthy_threshold

gracePeriods:
  transactionShutdownSeconds: 0 # transaction_shutdown_grace_period
  transitionSeconds: 0          # serving_state_grace_period

replicationTracker:
  mode: disable                    # enable_replication_reporter
  heartbeatIntervalMilliseconds: 0 # heartbeat_enable, heartbeat_interval

hotRowProtection:
  mode: disable|dryRun|enable # enable_hot_row_protection, enable_hot_row_protection_dry_run
  # Recommended value: same as txPool.size.
  maxQueueSize: 20            # hot_row_protection_max_queue_size
  maxGlobalQueueSize: 1000    # hot_row_protection_max_global_queue_size
  maxConcurrency: 5           # hot_row_protection_concurrent_transactions

consolidator: enable|disable|notOnMaster # enable-consolidator, enable-consolidator-replicas
passthroughDML: false                    # queryserver-config-passthrough-dmls
streamBufferSize: 32768                  # queryserver-config-stream-buffer-size
queryCacheSize: 5000                     # queryserver-config-query-cache-size
schemaReloadIntervalSeconds: 1800        # queryserver-config-schema-reload-time
watchReplication: false                  # watch_replication_stream
terseErrors: false                       # queryserver-config-terse-errors
messagePostponeParallelism: 4            # queryserver-config-message-postpone-cap
cacheResultFields: true                  # enable-query-plan-field-caching


# The following flags are currently not supported.
# enforce_strict_trans_tables
# queryserver-config-strict-table-acl
# queryserver-config-enable-table-acl-dry-run
# queryserver-config-acl-exempt-acl
# enable-tx-throttler
# tx-throttler-config
# tx-throttler-healthcheck-cells
# enable_transaction_limit
# enable_transaction_limit_dry_run
# transaction_limit_per_user
# transaction_limit_by_username
# transaction_limit_by_principal
# transaction_limit_by_component
# transaction_limit_by_subcomponent

创建后端数据库账号

create user 'msandbox'@'%' identified by 'msandbox';
grant all privileges on *.* to 'msandbox'@'%';
create user 'dba'@'%' identified by 'doumi1.q';
grant all privileges on *.* to 'dba'@'%';
create user 'dmrepl'@'%' identified by 'doumi1.q';
GRANT REPLICATION SLAVE ON *.* TO 'dmrepl'@'%';

编辑vttablet启动脚本

#!/bin/bash

# Copyright 2019 The Vitess Authors.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

source ./env.sh

CELL="zone1"
KEYSPACE=$1
# tablet_uid in (100,200)
TABLET_UID=$2
#DB_NAME=$3
#mysql_port=$4
SHARD=$3
tablet_config="/data/service/vitess/config/tablet/tablet_${TABLET_UID}_${KEYSPACE}.yaml"
echo "$tablet_config"

if [[ $# != 3 ]];then
    #echo "usage: sh script/vttablet-up.sh keyspace tablet_uid db_name mysql_port SHARD"
    echo "usage: sh script/vttablet-up.sh keyspace tablet_uid shard" 
    exit 1

fi

cell=${CELL:-'test'}
keyspace=${KEYSPACE:-'test_keyspace'}
shard=${SHARD:-'0'}
uid=$TABLET_UID
port=$[15000 + $uid]
grpc_port=$[16000 + $uid]
printf -v alias '%s-%010d' $cell $uid
printf -v tablet_dir 'vt_%010d' $uid
tablet_hostname=''
printf -v tablet_logfile 'vttablet_%010d_querylog.txt' $uid

tablet_type=replica

if [ ! -d $VTDATAROOT/$tablet_dir ];then
    mkdir -p $VTDATAROOT/$tablet_dir
fi

echo "Starting vttablet for $alias..."
echo $TOPOLOGY_FLAGS
echo $vtctld_host

vttablet \
 $TOPOLOGY_FLAGS \
 -logtostderr \
 -log_queries_to_file $VTDATAROOT/tmp/$tablet_logfile \
 -tablet-path $alias \
 -port $port \
 -init_keyspace $keyspace \
 -init_shard $SHARD \
 -init_tablet_type replica \
 -grpc_port $grpc_port \
 -service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
 -pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
 -vtctld_addr http://$vtctld_host:$vtctld_web_port/ \
 -init_populate_metadata \
 -enable_semi_sync \
 -enable_replication_reporter \
 -tablet_config $tablet_config \
 > $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &

# Block waiting for the tablet to be listening
# Not the same as healthy

for i in $(seq 0 300); do
 curl -I "http://$hostname:$port/debug/status" >/dev/null 2>&1 && break
 sleep 0.1
done

# check one last time
curl -I "http://$hostname:$port/debug/status" || fail "tablet could not be started!"

启动vttablet

# 10.216.91.116
sh scripts/vttablet-up-config.sh commerce 100 0
# 10.216.91.117
sh scripts/vttablet-up-config.sh commerce 101 0
# 10.216.91.118
sh scripts/vttablet-up-config.sh commerce 102 0

注意:
此处会创建_vt库,创建过程中会使用SET @@session.sql_log_bin = 0。

初始化master节点

vtctlclient \
-server 10.216.91.117:15999 \
-log_dir /data/service_data/vitess/vtdataroot/tmp/ \
-alsologtostderr \
InitShardMaster -force commerce/0 zone1-101

注意:

  1. 此处默认会创建vt_commerce库,如果需要使用自定义库,启动vttablet时,使用参数 -init_db_name_override $DB_NAME
  2. 在已经有复制结构的分片上使用InitShardMaster有可能造成数据丢失,可以使用PlannedReparentShard or EmergencyReparentShard命令。

输出日志:


I0828 11:44:01.070516 15725 trace.go:151] successfully started tracing with [noop]
W0828 11:44:01.096101 15725 main.go:64] W0828 03:44:01.094444 reparent.go:185] master-elect tablet zone1-0000000100 is not the shard master, proceeding anyway as -force was used
W0828 11:44:01.096270 15725 main.go:64] W0828 03:44:01.094652 reparent.go:191] master-elect tablet zone1-0000000100 is not a master in the shard, proceeding anyway as -force was used
I0828 11:44:01.096282 15725 main.go:64] I0828 03:44:01.094744 reparent.go:222] resetting replication on tablet zone1-0000000100
I0828 11:44:01.096292 15725 main.go:64] I0828 03:44:01.094859 reparent.go:222] resetting replication on tablet zone1-0000000102
I0828 11:44:01.096300 15725 main.go:64] I0828 03:44:01.094780 reparent.go:222] resetting replication on tablet zone1-0000000101
I0828 11:44:02.124341 15725 main.go:64] I0828 03:44:02.123960 reparent.go:241] initializing master on zone1-0000000100
I0828 11:44:02.161302 15725 main.go:64] I0828 03:44:02.160963 reparent.go:274] populating reparent journal on new master zone1-0000000100
I0828 11:44:02.161342 15725 main.go:64] I0828 03:44:02.161069 reparent.go:281] initializing slave zone1-0000000101
I0828 11:44:02.161496 15725 main.go:64] I0828 03:44:02.161168 reparent.go:281] initializing slave zone1-0000000102

测试数据

创建schema

vtctlclient \
-server 10.216.91.117:15999 \
-log_dir /data/service_data/vitess/vtdataroot/tmp/ \
-alsologtostderr \
ApplySchema -sql-file create_commerce_schema.sql commerce

创建vschema

vtctlclient \
-server 10.216.91.117:15999 \
-log_dir /data/service_data/vitess/vtdataroot/tmp/ \
-alsologtostderr \
ApplyVSchema -vschema_file vschema_commerce_initial.json commerce

初始化数据

mysql -h 127.0.0.1 -P 15306 < ../common/insert_commerce_data.sql

查看导入的数据:
mysql -h 127.0.0.1 -P 15306 < ../common/select_commerce_data.sql

启动vtgate

编辑vtgate-up.sh

#!/bin/bash

# Copyright 2019 The Vitess Authors.
# 
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This is an example script that starts a single vtgate.

source ./env.sh

CELL="zone1"
cell=${CELL:-'test'}
web_port=15001
grpc_port=15991
mysql_server_port=15306
mysql_server_socket_path="/tmp/mysql_${mysql_server_port}.sock"

# Start vtgate.
# shellcheck disable=SC2086
vtgate \
  $TOPOLOGY_FLAGS \
  -log_dir $VTDATAROOT/tmp \
  -log_queries_to_file $VTDATAROOT/tmp/vtgate_querylog.txt \
  -port $web_port \
  -grpc_port $grpc_port \
  -mysql_server_port $mysql_server_port \
  -mysql_server_socket_path $mysql_server_socket_path \
  -cell $cell \
  -cells_to_watch $cell \
  -tablet_types_to_wait MASTER,REPLICA \
  -gateway_implementation discoverygateway \
  -service_map 'grpc-vtgateservice' \
  -pid_file $VTDATAROOT/tmp/vtgate.pid \
  -mysql_auth_server_impl none \
  > $VTDATAROOT/tmp/vtgate.out 2>&1 &

# Block waiting for vtgate to be listening
# Not the same as healthy

echo "Waiting for vtgate to be up..."
while true; do
 curl -I "http://$hostname:$web_port/debug/status" >/dev/null 2>&1 && break
 sleep 0.1
done;
echo "vtgate is up!"

echo "Access vtgate at http://$hostname:$web_port/debug/status"

disown -a

启动vtgate

sh scripts/vtgate-up.sh

查看vtgate web:

http://10.216.91.118:15001/debug/status

 

读写分离

使用语法:@[master|replica|rdonly]
use commerce@replica;
或者
select * from commerce@replica.corder;

vitess与shardingsphere对比:

 

vitess

shardingsphere

分片模式

只分库

分库+分表+分库分表

SQL支持

略多于shardingsphere

略少于vitess

分片路由

支持=和in

支持=、in、between、or,并允许用户hint强制路由

分布式事务

1PC(弱XA),2PC(强XA)

1PC(弱XA),2PC(强XA),将支持柔性事务SAGA

读写分离

在SQL指定主库或从库

配置之后自动路由

分布式主键

通过全局非分片序列表实现

通过接口实现,默认使用雪花算法

配置管理

支持zookeeper、etcd、consul、k8s

支持zookeeper和etcd

数据治理

提供数据库备份,主从切换,重分片

 

 

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值