TIDB-在单机上模拟部署生产环境集群(支持 Linux)

0、参考

TiDB 数据库快速上手指南 | PingCAP 文档中心

1、准备环境

准备一台部署主机,确保其软件满足需求:

  • 推荐安装 CentOS 7.3 及以上版本,本次使用oralce linux  8.9
  • 运行环境可以支持互联网访问,用于下载 TiDB 及相关软件安装包
  • 主机或虚拟机,本次使用vbox,分配内存建议至少10G

(可选)关闭防火墙,创建用户(测试可以用root)

#关闭防火墙

[root@bogon ~]# firewall-cmd --state
running
[root@bogon ~]# systemctl status firewalld.service
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; enabled; vendor pres>
   Active: active (running) since Wed 2024-07-24 21:14:39 CST; 45min ago
     Docs: man:firewalld(1)
 Main PID: 1043 (firewalld)
    Tasks: 2 (limit: 100052)
   Memory: 46.5M
   CGroup: /system.slice/firewalld.service
           └─1043 /usr/libexec/platform-python -s /usr/sbin/firewalld --nofork --n>

Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
[root@bogon ~]# systemctl stop firewalld.service
[root@bogon ~]# systemctl disable firewalld.service
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@bogon ~]# systemctl status firewalld.service
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor pre>
   Active: inactive (dead) since Wed 2024-07-24 22:00:26 CST; 13s ago
     Docs: man:firewalld(1)
 Main PID: 1043 (code=exited, status=0/SUCCESS)

Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 21:14:41 bogon firewalld[1043]: WARNING: COMMAND_FAILED: '/usr/sbin/iptable>
Jul 24 22:00:25 bogon systemd[1]: Stopping firewalld - dynamic firewall daemon...
Jul 24 22:00:26 bogon systemd[1]: firewalld.service: Succeeded.
Jul 24 22:00:26 bogon systemd[1]: Stopped firewalld - dynamic firewall daemon.



#设置参数

[root@bogon ~]# echo "fs.file-max = 1000000">> /etc/sysctl.conf
[root@bogon ~]# echo "net.core.somaxconn = 32768">> /etc/sysctl.conf
[root@bogon ~]# echo "net.ipv4.tcp_tw_recycle = 0">> /etc/sysctl.conf
[root@bogon ~]# echo "net.ipv4.tcp_syncookies = 0">> /etc/sysctl.conf
[root@bogon ~]# echo "vm.overcommit_memory = 1">> /etc/sysctl.conf
[root@bogon ~]# echo "vm.min_free_kbytes = 1048576">> /etc/sysctl.conf
[root@bogon ~]# sysctl -p
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 4294967295
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
fs.file-max = 1000000
net.core.somaxconn = 32768
sysctl: cannot stat /proc/sys/net/ipv4/tcp_tw_recycle: No such file or directory
net.ipv4.tcp_syncookies = 0
vm.overcommit_memory = 1
vm.min_free_kbytes = 1048576
[root@bogon ~]# cat << EOF >>/etc/security/limits.conf
> tidb           soft    nofile          1000000
> tidb           hard    nofile          1000000
> tidb           soft    stack          32768
> tidb           hard    stack          32768
> EOF



#建目录
[root@bogon ~]#  mkdir /tmp/tidb
[root@bogon ~]# chmod -R 777 /tmp/tidb


#创建用户

[root@bogon ~]# useradd -d /home/tidb  -s /bin/bash tidb 
[root@bogon ~]# passwd tidb 
Changing password for user tidb.
New password: 
Retype new password: 
passwd: all authentication tokens updated successfully.


 

2、实施部署

 2.1下载并安装 TiUP:

[tidb@bogon ~]$ curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 5149k  100 5149k    0     0  4298k      0  0:00:01  0:00:01 --:--:-- 4295k
WARN: adding root certificate via internet: https://tiup-mirrors.pingcap.com/root.json
You can revoke this by remove /home/tidb/.tiup/bin/7b8e153f2e2d0928.root.json
Successfully set mirror to https://tiup-mirrors.pingcap.com
Detected shell: bash
Shell profile:  /home/tidb/.bash_profile
/home/tidb/.bash_profile has been modified to add tiup to PATH
open a new terminal or source /home/tidb/.bash_profile to use it
Installed path: /home/tidb/.tiup/bin/tiup
===============================================
Have a try:     tiup playground
===============================================


2.2 申明全局环境变量:
[tidb@bogon ~]$ . .bash_profile


2.3安装tiup的cluster组件
[tidb@bogon ~]$ tiup cluster
Checking updates for component cluster... 
A new version of cluster is available:  -> v1.16.0
    To update this component:   tiup update cluster
    To update all components:   tiup update --all

The component `cluster` version  is not installed; downloading from repository.
download https://tiup-mirrors.pingcap.com/cluster-v1.16.0-linux-amd64.tar.gz 2.44 Mdownload https://tiup-mirrors.pingcap.com/cluster-v1.16.0-linux-amd64.tar.gz 7.08 Mdownload https://tiup-mirrors.pingcap.com/cluster-v1.16.0-linux-amd64.tar.gz 8.83 MiB / 8.83 MiB 100.00% 32.89 MiB/s
Deploy a TiDB cluster for production

Usage:
  tiup cluster [command]

Available Commands:
  check       Perform preflight checks for the cluster.
  deploy      Deploy a cluster for production
  start       Start a TiDB cluster
  stop        Stop a TiDB cluster
  restart     Restart a TiDB cluster
  scale-in    Scale in a TiDB cluster
  scale-out   Scale out a TiDB cluster
  destroy     Destroy a specified cluster
  clean       (EXPERIMENTAL) Cleanup a specified cluster
  upgrade     Upgrade a specified TiDB cluster
  display     Display information of a TiDB cluster
  prune       Destroy and remove instances that is in tombstone state
  list        List all clusters
  audit       Show audit log of cluster operation
  import      Import an exist TiDB cluster from TiDB-Ansible
  edit-config Edit TiDB cluster config
  show-config Show TiDB cluster config
  reload      Reload a TiDB cluster's config and restart if needed
  patch       Replace the remote package with a specified package and restart the service
  rename      Rename the cluster
  enable      Enable a TiDB cluster automatically at boot
  disable     Disable automatic enabling of TiDB clusters at boot
  replay      Replay previous operation and skip successed steps
  template    Print topology template
  tls         Enable/Disable TLS between TiDB components
  meta        backup/restore meta information
  rotatessh   rotate ssh keys on all nodes
  help        Help about any command
  completion  Generate the autocompletion script for the specified shell

Flags:
  -c, --concurrency int     max number of parallel tasks allowed (default 5)
      --format string       (EXPERIMENTAL) The format of output, available values are [default, json] (default "default")
  -h, --help                help for tiup
      --ssh string          (EXPERIMENTAL) The executor type: 'builtin', 'system', 'none'.
      --ssh-timeout uint    Timeout in seconds to connect host via SSH, ignored for operations that don't need an SSH connection. (default 5)
  -v, --version             version for tiup
      --wait-timeout uint   Timeout in seconds to wait for an operation to complete, ignored for operations that don't fit. (default 120)
  -y, --yes                 Skip all confirmations and assumes 'yes'

Use "tiup cluster help [command]" for more information about a command.


2.4如果机器已经安装 TiUP cluster,需要更新软件版本:
[tidb@bogon ~]$ tiup update --self && tiup update cluster
download https://tiup-mirrors.pingcap.com/tiup-v1.16.0-linux-amd64.tar.gz 2.60 MiB download https://tiup-mirrors.pingcap.com/tiup-v1.16.0-linux-amd64.tar.gz 5.03 MiB / 5.03 MiB 100.00% 53.15 MiB/s
Updated successfully!
component cluster version v1.16.0 is already installed
Updated successfully!


2.5由于模拟多机部署,需要通过 root 用户调大 sshd 服务的连接数限制:

修改 /etc/ssh/sshd_config 将 MaxSessions 调至 20。

重启 sshd 服务:
service sshd restart

2.6创建并启动集群
编辑配置文件,topo.yaml

[tidb@bogon ~]$ vi topo.yaml
# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
 user: "tidb"
 ssh_port: 22
 deploy_dir: "/data/tidbdata/tidb-deploy"
 data_dir: "/data/tidbdata/tidb-data"

# # Monitored variables are applied to all the machines.
monitored:
 node_exporter_port: 9100
 blackbox_exporter_port: 9115

server_configs:
 tidb:
   instance.tidb_slow_log_threshold: 300
 tikv:
   readpool.storage.use-unified-pool: false
   readpool.coprocessor.use-unified-pool: true
 pd:
   replication.enable-placement-rules: true
   replication.location-labels: ["host"]
 tiflash:
   logger.level: "info"

pd_servers:
 - host: 192.168.1.22

tidb_servers:
 - host: 192.168.1.22

tikv_servers:
 - host: 192.168.1.22
   port: 20160


2.7 执行集群部署命令:
[tidb@bogon ~]$ tiup cluster deploy demo 8.2.0  ./topo.yaml --user root -p tidb
Input SSH password: 

+ Detect CPU Arch Name
  - Detecting node 192.168.1.22 Arch info ... Done

+ Detect CPU OS Name
  - Detecting node 192.168.1.22 OS info ... Done
Please confirm your topology:
Cluster type:    tidb
Cluster name:    demo
Cluster version: v8.2.0
Role        Host          Ports                            OS/Arch       Directories
----        ----          -----                            -------       -----------
pd          192.168.1.22  2379/2380                        linux/x86_64  /data/tidbdata/tidb-deploy/pd-2379,/data/tidbdata/tidb-data/pd-2379
tikv        192.168.1.22  20160/20180                      linux/x86_64  /data/tidbdata/tidb-deploy/tikv-20160,/data/tidbdata/tidb-data/tikv-20160
tikv        192.168.1.22  20161/20181                      linux/x86_64  /data/tidbdata/tidb-deploy/tikv-20161,/data/tidbdata/tidb-data/tikv-20161
tikv        192.168.1.22  20162/20182                      linux/x86_64  /data/tidbdata/tidb-deploy/tikv-20162,/data/tidbdata/tidb-data/tikv-20162
tidb        192.168.1.22  4000/10080                       linux/x86_64  /data/tidbdata/tidb-deploy/tidb-4000
tiflash     192.168.1.22  9000/8123/3930/20170/20292/8234  linux/x86_64  /data/tidbdata/tidb-deploy/tiflash-9000,/data/tidbdata/tidb-data/tiflash-9000
prometheus  192.168.1.22  9090/12020                       linux/x86_64  /data/tidbdata/tidb-deploy/prometheus-9090,/data/tidbdata/tidb-data/prometheus-9090
grafana     192.168.1.22  3000                             linux/x86_64  /data/tidbdata/tidb-deploy/grafana-3000
Attention:
    1. If the topology is not what you expected, check your yaml file.
    2. Please confirm there is no port/directory conflicts in same host.
Do you want to continue? [y/N]: (default=N) y
+ Generate SSH keys ... Done
+ Download TiDB components
+ Download TiDB components
  - Download pd:v8.2.0 (linux/amd64) ... Done
  - Download tikv:v8.2.0 (linux/amd64) ... Done
  - Download tidb:v8.2.0 (linux/amd64) ... Done
  - Download tiflash:v8.2.0 (linux/amd64) ... Done
  - Download prometheus:v8.2.0 (linux/amd64) ... Done
  - Download grafana:v8.2.0 (linux/amd64) ... Done
  - Download node_exporter: (linux/amd64) ... Done
  - Download blackbox_exporter: (linux/amd64) ... Done
+ Initialize target host environments
  - Prepare 192.168.1.22:22 ... Done
+ Deploy TiDB instance
  - Copy pd -> 192.168.1.22 ... Done
  - Copy tikv -> 192.168.1.22 ... Done
  - Copy tikv -> 192.168.1.22 ... Done
  - Copy tikv -> 192.168.1.22 ... Done
  - Copy tidb -> 192.168.1.22 ... Done
  - Copy tiflash -> 192.168.1.22 ... Done
  - Copy prometheus -> 192.168.1.22 ... Done
  - Copy grafana -> 192.168.1.22 ... Done
  - Deploy node_exporter -> 192.168.1.22 ... Done
  - Deploy blackbox_exporter -> 192.168.1.22 ... Done
+ Copy certificate to remote host
+ Init instance configs
  - Generate config pd -> 192.168.1.22:2379 ... Done
  - Generate config tikv -> 192.168.1.22:20160 ... Done
  - Generate config tikv -> 192.168.1.22:20161 ... Done
  - Generate config tikv -> 192.168.1.22:20162 ... Done
  - Generate config tidb -> 192.168.1.22:4000 ... Done
  - Generate config tiflash -> 192.168.1.22:9000 ... Done
  - Generate config prometheus -> 192.168.1.22:9090 ... Done
  - Generate config grafana -> 192.168.1.22:3000 ... Done
+ Init monitor configs
  - Generate config node_exporter -> 192.168.1.22 ... Done
  - Generate config blackbox_exporter -> 192.168.1.22 ... Done
Enabling component pd
        Enabling instance 192.168.1.22:2379
        Enable instance 192.168.1.22:2379 success
Enabling component tikv
        Enabling instance 192.168.1.22:20162
        Enabling instance 192.168.1.22:20160
        Enabling instance 192.168.1.22:20161
        Enable instance 192.168.1.22:20162 success
        Enable instance 192.168.1.22:20161 success
        Enable instance 192.168.1.22:20160 success
Enabling component tidb
        Enabling instance 192.168.1.22:4000
        Enable instance 192.168.1.22:4000 success
Enabling component tiflash
        Enabling instance 192.168.1.22:9000
        Enable instance 192.168.1.22:9000 success
Enabling component prometheus
        Enabling instance 192.168.1.22:9090
        Enable instance 192.168.1.22:9090 success
Enabling component grafana
        Enabling instance 192.168.1.22:3000
        Enable instance 192.168.1.22:3000 success
Enabling component node_exporter
        Enabling instance 192.168.1.22
        Enable 192.168.1.22 success
Enabling component blackbox_exporter
        Enabling instance 192.168.1.22
        Enable 192.168.1.22 success
Cluster `demo` deployed successfully, you can start it with command: `tiup cluster start demo --init`


2.8 启动集群:      
[tidb@bogon ~]$ tiup cluster start demo
Starting cluster demo...
+ [ Serial ] - SSHKeySet: privateKey=/home/tidb/.tiup/storage/cluster/clusters/demo/ssh/id_rsa, publicKey=/home/tidb/.tiup/storage/cluster/clusters/demo/ssh/id_rsa.pub
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [Parallel] - UserSSH: user=tidb, host=192.168.1.22
+ [ Serial ] - StartCluster
Starting component pd
        Starting instance 192.168.1.22:2379
        Start instance 192.168.1.22:2379 success
Starting component tikv
        Starting instance 192.168.1.22:20162
        Starting instance 192.168.1.22:20160
        Starting instance 192.168.1.22:20161
        Start instance 192.168.1.22:20161 success
        Start instance 192.168.1.22:20160 success
        Start instance 192.168.1.22:20162 success
Starting component tidb
        Starting instance 192.168.1.22:4000
        Start instance 192.168.1.22:4000 success
Starting component tiflash
        Starting instance 192.168.1.22:9000
        Start instance 192.168.1.22:9000 success
Starting component prometheus
        Starting instance 192.168.1.22:9090
        Start instance 192.168.1.22:9090 success
Starting component grafana
        Starting instance 192.168.1.22:3000
        Start instance 192.168.1.22:3000 success
Starting component node_exporter
        Starting instance 192.168.1.22
        Start 192.168.1.22 success
Starting component blackbox_exporter
        Starting instance 192.168.1.22
        Start 192.168.1.22 success
+ [ Serial ] - UpdateTopology: cluster=demo
Started cluster `demo` successfully
[tidb@bogon ~]$ 


2.9访问集群:

安装 MySQL 客户端。如果已安装 MySQL 客户端则可跳过这一步骤。

yum -y install mysql

访问 TiDB 数据库,密码为空:

mysql -h 127.0.0.1 -P 4000 -u root

[root@bogon ~]# mysql  -h 127.0.0.1 -P 4000 -u root
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 501219336
Server version: 8.0.11-TiDB-v8.2.0 TiDB Server (Apache License 2.0) Community Edition, MySQL 8.0 compatible

Copyright (c) 2000, 2024, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| INFORMATION_SCHEMA |
| METRICS_SCHEMA     |
| PERFORMANCE_SCHEMA |
| mysql              |
| sys                |
| test               |
+--------------------+
6 rows in set (0.01 sec)


访问 TiDB 的 Grafana 监控:

通过 http://127.0.0.1:3000 访问集群 Grafana 监控页面,默认用户名和密码均为 admin。

访问 TiDB 的 Dashboard:

通过 http://{127.0.0.1:2379/dashboard 访问集群 TiDB Dashboard 监控页面,默认用户名为 root,密码为空。

执行以下命令确认当前已经部署的集群列表:

tiup cluster list
执行以下命令查看集群的拓扑结构和状态:

tiup cluster display <cluster-name>

执行以下命令确认当前已经部署的集群列表:
[tidb@bogon ~]$ tiup cluster list
Name  User  Version  Path                                            PrivateKey
----  ----  -------  ----                                            ----------
demo  tidb  v8.2.0   /home/tidb/.tiup/storage/cluster/clusters/demo  /home/tidb/.tiup/storage/cluster/clusters/demo/ssh/id_rsa

执行以下命令查看集群的拓扑结构和状态:
[tidb@bogon ~]$ tiup cluster display demo
Cluster type:       tidb
Cluster name:       demo
Cluster version:    v8.2.0
Deploy user:        tidb
SSH type:           builtin
Dashboard URL:      http://192.168.1.22:2379/dashboard
Grafana URL:        http://192.168.1.22:3000
ID                  Role        Host          Ports                            OS/Arch       Status   Data Dir                                  Deploy Dir
--                  ----        ----          -----                            -------       ------   --------                                  ----------
192.168.1.22:3000   grafana     192.168.1.22  3000                             linux/x86_64  Up       -                                         /data/tidbdata/tidb-deploy/grafana-3000
192.168.1.22:2379   pd          192.168.1.22  2379/2380                        linux/x86_64  Up|L|UI  /data/tidbdata/tidb-data/pd-2379          /data/tidbdata/tidb-deploy/pd-2379
192.168.1.22:9090   prometheus  192.168.1.22  9090/12020                       linux/x86_64  Up       /data/tidbdata/tidb-data/prometheus-9090  /data/tidbdata/tidb-deploy/prometheus-9090
192.168.1.22:4000   tidb        192.168.1.22  4000/10080                       linux/x86_64  Up       -                                         /data/tidbdata/tidb-deploy/tidb-4000
192.168.1.22:9000   tiflash     192.168.1.22  9000/8123/3930/20170/20292/8234  linux/x86_64  Up       /data/tidbdata/tidb-data/tiflash-9000     /data/tidbdata/tidb-deploy/tiflash-9000
192.168.1.22:20160  tikv        192.168.1.22  20160/20180                      linux/x86_64  Up       /data/tidbdata/tidb-data/tikv-20160       /data/tidbdata/tidb-deploy/tikv-20160
192.168.1.22:20161  tikv        192.168.1.22  20161/20181                      linux/x86_64  Up       /data/tidbdata/tidb-data/tikv-20161       /data/tidbdata/tidb-deploy/tikv-20161
192.168.1.22:20162  tikv        192.168.1.22  20162/20182                      linux/x86_64  Up       /data/tidbdata/tidb-data/tikv-20162       /data/tidbdata/tidb-deploy/tikv-20162
Total nodes: 8


 

  • 3
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
TiDB集群环境的部署可以通过使用TiUP来完成。TiUP是TiDB 4.0版本引入的集群运维工具,它提供了集群管理组件TiUP cluster,可以用于管理TiDB集群部署、启动、关闭、销毁、弹性扩缩容、升级等工作。通过执行TiUP命令,可以输出当前通过TiUP cluster管理的所有集群信息,包括集群名称、部署用户、版本、密钥信息等。 具体的部署步骤如下: 1. 安装TiUP组件:执行TiUP命令进行组件安装。 2. 创建集群配置文件:使用TiUP cluster命令创建一个新的集群配置文件。 3. 配置集群参数:根据需求修改集群配置文件中的参数,例如副本数、节点数量等。 4. 部署TiDB集群:执行TiUP cluster命令进行集群部署。 5. 检查部署情况:执行TiUP cluster命令检查部署TiDB集群情况。 通过以上步骤,可以完成TiDB集群环境的部署和配置。使用TiUP作为集群管理工具,可以方便地进行TiDB生态下各个组件的管理和运维工作,极大地降低了管理难度。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *2* [使用 TiUP 部署 TiDB 集群](https://blog.csdn.net/weixin_42241611/article/details/125518329)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_1"}}] [.reference_item style="max-width: 50%"] - *3* [TIDB集群部署](https://blog.csdn.net/qq_21040559/article/details/127716535)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_1"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值