[tidb@tidb ~]$ cat topology.yaml
# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
user: "tidb"
ssh_port: 22
deploy_dir: "/data1/tidb-deploy"
data_dir: "/data1/tidb-data"
# # Monitored variables are applied to all the machines.
monitored:
node_exporter_port: 9100
blackbox_exporter_port: 9115
# deploy_dir: "/data1/tidb-deploy/monitored-9100"
# data_dir: "/data1/tidb-data/monitored-9100"
# log_dir: "/data1/tidb-deploy/monitored-9100/log"
# # Server configs are used to specify the runtime configuration of TiDB components.
# # All configuration items can be found in TiDB docs:
# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/
# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/
# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/
# # All configuration items use points to represent the hierarchy, e.g:
# # readpool.storage.use-unified-pool
# #
# # You can overwrite this configuration via the instance-level `config` field.
server_configs:
tidb:
log.level: "error"
prepared-plan-cache.enabled: true
log.slow-threshold: 300
tikv:
# server.grpc-concurrency: 4
# raftstore.apply-pool-size: 2
# raftstore.store-pool-size: 2
# rocksdb.max-sub-compactions: 1
# storage.block-cache.capacity: "16GB"
# readpool.unified.max-thread-count: 12
storage.block-cache.capacity: "6GB"
readpool.storage.use-unified-pool: false
readpool.coprocessor.use-unified-pool: true
pd:
schedule.leader-schedule-limit: 4
schedule.region-schedule-limit: 2048
schedule.replica-schedule-limit: 64
replication.enable-placement-rules: true
tiflash:
# Maximum memory usage for processing a single query. Zero means unlimited.
profiles.default.max_memory_usage: 0
# Maximum memory usage for processing all concurrently running queries on the server. Zero means unlimited.
profiles.default.max_memory_usage_for_all_queries: 0
pd_servers:
- host: 192.168.0.81
ssh_port: 22
name: "pd-1"
client_port: 2379
peer_port: 2380
deploy_dir: "/data1/tidb-deploy/pd-2379"
data_dir: "/data1/tidb-data/pd-2379"
log_dir: "/data1/tidb-deploy/pd-2379/log"
numa_node: "0,1"
config:
schedule.max-merge-region-size: 20
schedule.max-merge-region-keys: 200000
tidb_servers:
- host: 192.168.0.81
ssh_port: 22
port: 4000
status_port: 10080
deploy_dir: "/data1/tidb-deploy/tidb-4000"
log_dir: "/data1/tidb-deploy/tidb-4000/log"
numa_node: "0,1"
config:
log.slow-query-file: tidb-slow-overwrited.log
tikv_servers:
- host: 192.168.0.48
- host: 192.168.0.38
- host: 192.168.0.76
ssh_port: 22
port: 20160
status_port: 20180
deploy_dir: "/data1/tidb-deploy/tikv-20160"
data_dir: "/data1/tidb-data/tikv-20160"
log_dir: "/data1/tidb-deploy/tikv-20160/log"
cdc_servers:
- host: 192.168.0.99
port: 8300
deploy_dir: "/data1/tidb-deploy/cdc-8300"
log_dir: "/data1/tidb-deploy/cdc-8300/log"
monitoring_servers:
- host: 192.168.0.99
ssh_port: 22
port: 9090
deploy_dir: "/data1/tidb-deploy/prometheus-8249"
data_dir: "/data1/tidb-data/prometheus-8249"
log_dir: "/data1/tidb-deploy/prometheus-8249/log"
grafana_servers:
- host: 192.168.0.99
port: 3000
deploy_dir: "/data1/tidb-deploy/grafana-3000"
alertmanager_servers:
- host: 192.168.0.99
ssh_port: 22
web_port: 9093
cluster_port: 9094
deploy_dir: "/data1/tidb-deploy/alertmanager-9093"
data_dir: "/data1/tidb-data/alertmanager-9093"
log_dir: "/data1/tidb-deploy/alertmanager-9093/log"
[root@tidb ~]# su tidb
[tidb@tidb root]$ cd
[tidb@tidb ~]$ ls
config topology_bak20210711.yaml topology_bak.yaml topology.yaml
[tidb@tidb ~]$ cat topology_bak20210711.yaml
# # Global variables are applied to all deployments and used as the default value of
# # the deployments if a specific deployment value is missing.
global:
user: "tidb"
ssh_port: 22
deploy_dir: "/tidb-deploy"
data_dir: "/tidb-data"
# # Monitored variables are applied to all the machines.
monitored:
node_exporter_port: 9100
blackbox_exporter_port: 9115
# deploy_dir: "/tidb-deploy/monitored-9100"
# data_dir: "/tidb-data/monitored-9100"
# log_dir: "/tidb-deploy/monitored-9100/log"
# # Server configs are used to specify the runtime configuration of TiDB components.
# # All configuration items can be found in TiDB docs:
# # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/
# # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/
# # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/
# # All configuration items use points to represent the hierarchy, e.g:
# # readpool.storage.use-unified-pool
# #
# # You can overwrite this configuration via the instance-level `config` field.
server_configs:
tidb:
log.slow-threshold: 300
tikv:
# server.grpc-concurrency: 4
# raftstore.apply-pool-size: 2
# raftstore.store-pool-size: 2
# rocksdb.max-sub-compactions: 1
# storage.block-cache.capacity: "16GB"
# readpool.unified.max-thread-count: 12
readpool.storage.use-unified-pool: false
readpool.coprocessor.use-unified-pool: true
pd:
schedule.leader-schedule-limit: 4
schedule.region-schedule-limit: 2048
schedule.replica-schedule-limit: 64
replication.enable-placement-rules: true
tiflash:
# Maximum memory usage for processing a single query. Zero means unlimited.
profiles.default.max_memory_usage: 0
# Maximum memory usage for processing all concurrently running queries on the server. Zero means unlimited.
profiles.default.max_memory_usage_for_all_queries: 0
pd_servers:
- host: 192.168.0.81
# ssh_port: 22
# name: "pd-1"
# client_port: 2379
# peer_port: 2380
# deploy_dir: "/tidb-deploy/pd-2379"
# data_dir: "/tidb-data/pd-2379"
# log_dir: "/tidb-deploy/pd-2379/log"
# numa_node: "0,1"
# # The following configs are used to overwrite the `server_configs.pd` values.
# config:
# schedule.max-merge-region-size: 20
# schedule.max-merge-region-keys: 200000
tidb_servers:
- host: 192.168.0.81
# ssh_port: 22
# port: 4000
# status_port: 10080
# deploy_dir: "/tidb-deploy/tidb-4000"
# log_dir: "/tidb-deploy/tidb-4000/log"
# numa_node: "0,1"
# # The following configs are used to overwrite the `server_configs.tidb` values.
# config:
# log.slow-query-file: tidb-slow-overwrited.log
tikv_servers:
- host: 192.168.0.48
- host: 192.168.0.38
- host: 192.168.0.76
# ssh_port: 22
# port: 20160
# status_port: 20180
# deploy_dir: "/tidb-deploy/tikv-20160"
# data_dir: "/tidb-data/tikv-20160"
# log_dir: "/tidb-deploy/tikv-20160/log"
# numa_node: "0,1"
# # The following configs are used to overwrite the `server_configs.tikv` values.
# config:
# server.grpc-concurrency: 4
# server.labels: { zone: "zone1", dc: "dc1", host: "host1" }
tiflash_servers:
- host: 10.28.0.236
# ssh_port: 22
# tcp_port: 9000
# http_port: 8123
# flash_service_port: 3930
# flash_proxy_port: 20170
# flash_proxy_status_port: 20292
# metrics_port: 8234
# deploy_dir: /tidb-deploy/tiflash-9000
## The `data_dir` will be overwritten if you define `storage.main.dir` configurations in the `config` section.
# data_dir: /tidb-data/tiflash-9000
# numa_node: "0,1"
# # The following configs are used to overwrite the `server_configs.tiflash` values.
# config:
# logger.level: "info"
# ## Multi-disk deployment introduced in v4.0.9
# ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details.
# ## Example1:
# # storage.main.dir: [ "/nvme_ssd0_512/tiflash", "/nvme_ssd1_512/tiflash" ]
# # storage.main.capacity = [ 536870912000, 536870912000 ]
# ## Example2:
# # storage.main.dir: [ "/sata_ssd0_512/tiflash", "/sata_ssd1_512/tiflash", "/sata_ssd2_512/tiflash" ]
# # storage.latest.dir: [ "/nvme_ssd0_150/tiflash" ]
# # storage.main.capacity = [ 536870912000, 536870912000 ]
# # storage.latest.capacity = [ 161061273600 ]
# learner_config:
# log-level: "info"
# - host: 10.0.1.12
# - host: 10.0.1.13
cdc_servers:
- host: 192.168.0.99
port: 8300
deploy_dir: "/tidb-deploy/cdc-8300"
log_dir: "/tidb-deploy/cdc-8300/log"
monitoring_servers:
- host: 192.168.0.99
# ssh_port: 22
# port: 9090
# deploy_dir: "/tidb-deploy/prometheus-8249"
# data_dir: "/tidb-data/prometheus-8249"
# log_dir: "/tidb-deploy/prometheus-8249/log"
grafana_servers:
- host: 192.168.0.99
# port: 3000
# deploy_dir: /tidb-deploy/grafana-3000
alertmanager_servers:
- host: 192.168.0.99
# ssh_port: 22
# web_port: 9093
# cluster_port: 9094
# deploy_dir: "/tidb-deploy/alertmanager-9093"
# data_dir: "/tidb-data/alertmanager-9093"
# log_dir: "/tidb-deploy/alertmanager-9093/log"
[tidb@tidb ~]$ cat config
mysql-host=192.168.0.81
mysql-port=4000
mysql-user=root
mysql-password=
mysql-db=sbtest
time=600
threads=16
report-interval=10
db-driver=mysql