tidb install

host 172.31.28.209
host 172.31.28.210
host 172.31.28.211
port:
37254 SSH
9100  monitored node exporter port
9115  monitored blackbox exporter port
2379  PD server Client port
2380  PD server peer port
4000  TiDB server port
10080 TiDB server status port
20160 TiKV server port
20180 TiKV server status port
9090  monitoring server port
3000  grafana server port
9093  altermanager server web port
9094  altermanager server cluster port

2:check disk
#检查是否已经挂载ext4的硬盘,如果有则忽略以下步骤直接进行3 系统相关设定
shell> lsblk -f
#如果上方检查为非ext4 file system 则进行以下步骤
shell> umount /dev/sdb1;
#将硬盘格式化为ext4 file system
shell> mkfs.ext4 /dev/sdb1;
挂载硬盘
shell> mount /dev/sdb1 /data;
#检查/dev/sdb1 UUID
shell> lsblk -f;
#写入文件系统
shell> vi /etc/fstab;
把上面的检查UUID的值写入此处
 例如UUID=28756bf-facf-4f0d-811d-a3bafd6b99d7 /data ext4 defaults,nodelalloc,noatime 0 2

# 3 系统相关参数设定
#通过root 用户创建tidb
 #建立账户 分别在 172.31.28.209,172.31.28.210,172.31.28.211 执行
shell> useradd tidb;
shell> passwd tidb;


#建立ssh 免密钥登陆,相互通信key,分别在172.31.28.209,172.31.28.210,172.31.28.211 执行

shell> su - tidb
shell> ssh-keygen;

# 以172.31.28.209 为primary server
取出209的id_rsa.pub 放入到 201,211 两台服务亲 进行免密钥登陆
172.31.28.209:shell> cat /home/tidb/.ssh/id_rsa.pub;
 #登陆172.31.28.210 服务器
172.31.28.210:shell> su tidb 
172.31.28.210:shell> vi /home/tidb.ssh/authorized_keys;
#209 服务器里面的id_rsa.pub 黏贴进来,去掉空格
 #登陆172.31.28.211 服务器
172.31.28.211:shell> su tidb 
172.31.28.211:shell> vi /home/tidb.ssh/authorized_keys;
#209 服务器里面的id_rsa.pub 黏贴进来,去掉空格

#以上配置好密钥登陆基础信息

#4  设置系统层信息配置 分别在172.31.28.209,172.31.28.210,172.31.28.211 执行,载AllowUsers中添加tidb账户

shell> vi /etc/sudoers;
#输入 tidb ALL=(ALL)NOPASSWD:ALL
shell> vi /etc/ssh/sshd_config;
#no default banner path 添加tidb 用户
AllowUsers tidb root pzom pzuser
#Banner none
shell> systemctl reload sshd;

#5 测试ssh免密钥登陆
shell> su tidb
172.31.28.209:shell> ssh -p 37254 tdib@172.31.28.210
172.31.28.209:shell> ssh -p 37254 tdib@172.31.28.211
# 至此 以上免密钥登陆可以正常访问,如果密码密钥登陆ssh 没成功,可能是权限 或者密钥没考对,找相关文档,解决免密钥登陆问题.
# 如果需要关闭防火墙 或者加入到白名单 也建议把172.31.28.210,172.31.28.211 172.31.28.210,172.31.28.211 加入到各自服务器。
sudo iptables -I INPUT -s 172.31.28.210 -j ACCEPT
sudo iptables -I INPUT -s 172.31.28.211 -j ACCEPT
#4 安装tidb(172.31.28.209,172.31.28.210,172.31.28.211) 前期准备,如果是虚拟机怎不需要执行此步骤
 shell> yumm install numactl;

#下载TIDB Server
172.31.28.209:shell> wget https://download.pingcap.org/tidb-community-server-v7.5.0-linux-amd64.tar.gz;
172.31.28.209:shell> su tidb
172.31.28.209:shell> cd /user/local;
172.31.28.209:shell> tar -zxvf tidb-community-server-v7.5.0-linux-amd64.tar.gz;
172.31.28.209:shell> cd tidb-community-server-v7.5.0-linux-amd64;
172.31.28.209:shell> ./local_install.sh;
#会生成 topology.yaml
172.31.28.209:shell> vi topology.yaml;
#输入下面配置信息

global:
 user: "tidb"
 ssh_port: 37254
 deploy_dir: "/data/tidb-deploy"
 data_dir: "/data/tidb-data"

monitored:
 node_exporter_port: 9100
 blackbox_exporter_port: 9115

server_configs:
  tidb:
    log.level: "warn"
    log.slow-threshold: 10000
    max-server-connections: 10000
    performance.max-procs: 20
     tikv-client.copr-cache.admission-max-result-mb: 200.0
     tikv-client.copr-cache.capacity-mb: 16000.0
     tikv-client.copr-cache.enable: true
     tikv-client.max-batch-wait-time: 2000000
     new_collations_enabled_on_first_bootstrap: true
   tikv:
     coprocessor.region-max-keys: 2880000
     coprocessor.region-max-size: 288MB
     coprocessor.region-split-keys: 1920000
     coprocessor.region-split-size: 192MB
     pessimistic-txn.pipelined: true
     raftdb.allow-concurrent-memtable-write: true
     raftdb.max-background-jobs: 4
     raftstore.apply-pool-size: 3
     raftstore.store-pool-size: 3
     readpool.coprocessor.use-unified-pool: true
     readpool.storage.normal-concurrency: 10
     readpool.storage.use-unified-pool: false
     readpool.unified.max-thread-count: 28
     readpool.unified.min-thread-count: 5
     rocksdb.max-background-jobs: 8
     server.enable-request-batch: false
     server.grpc-concurrency: 6
     storage.block-cache.capacity: 6GB
     storage.block-cache.shared: true
     storage.scheduler-worker-pool-size: 3
    pd:
     replication.location-labels: ["host"]
     schedule.leader-schedule-limit: 4
     schedule.region-schedule-limit: 2048
     schedule.replica-schedule-limit: 64
    pd_servers:
     - host: 172.31.28.209
       ssh_port: 37254
       name: "pd-1"
       client_port: 2379
       peer_port: 2380
       deploy_dir: "/data/tidb-deploy/pd-2379"
       data_dir: "/data/tidb-data/pd-2379"
       log_dir: "/data/tidb-deploy/pd-2379/log"
       config:
        schedule.max-merge-region-size: 20
        schedule.max-merge-region-keys: 200000
     - host: 172.31.28.210
       ssh_port: 37254
       name: "pd-2"
       client_port: 2379
       peer_port: 2380
       deploy_dir: "/data/tidb-deploy/pd-2379"
       data_dir: "/data/tidb-data/pd-2379"
       log_dir: "/data/tidb-deploy/pd-2379/log"
       config:
        schedule.max-merge-region-size: 20
        schedule.max-merge-region-keys: 200000
     - host: 172.31.28.211
       ssh_port: 37254
       name: "pd-2"
       client_port: 2379
       peer_port: 2380
       deploy_dir: "/data/tidb-deploy/pd-2379"
       data_dir: "/data/tidb-data/pd-2379"
       log_dir: "/data/tidb-deploy/pd-2379/log"
       config:
        schedule.max-merge-region-size: 20
        schedule.max-merge-region-keys: 200000
    tidb_servers:
       - host: 172.31.28.209
         ssh_port: 37254
         port: 4000
         status_port: 10080
         deploy_dir: "/data/tidb-deploy/tidb-4000"
         log_dir: "/data/tidb-deploy/tidb-4000/log"
         config:
           log.slow-query-file: tidb-slow-overwrited.log
       - host: 172.31.28.210
         ssh_port: 37254
         port: 4000
         status_port: 10080
         deploy_dir: "/data/tidb-deploy/tidb-4000"
         log_dir: "/data/tidb-deploy/tidb-4000/log"
         config:
           log.slow-query-file: tidb-slow-overwrited.log
       - host: 172.31.28.211
         ssh_port: 37254
         port: 4000
         status_port: 10080
         deploy_dir: "/data/tidb-deploy/tidb-4000"
         log_dir: "/data/tidb-deploy/tidb-4000/log"
         config:
           log.slow-query-file: tidb-slow-overwrited.log

     tikv_servers:
       - host: 172.31.28.209
         ssh_port: 37254
         port: 20160
         status_port: 20180
         deploy_dir: "/data/tidb-deploy/tikv-20160"
         data_dir: "/data/tidb-data/tikv-20160"
         log_dir: "/data/tidb-deploy/tikv-20160/log"
         config:
           server.labels: { host: "tikv1" }
       - host: 172.31.28.210
         ssh_port: 37254
         port: 20160
         status_port: 20180
         deploy_dir: "/data/tidb-deploy/tikv-20161"
         data_dir: "/data/tidb-data/tikv-20161"
         log_dir: "/data/tidb-deploy/tikv-20161/log"
         config:
           server.labels: { host: "tikv2" }
       - host: 172.31.28.211
         ssh_port: 37254
         port: 20160
         status_port: 20180
         deploy_dir: "/data/tidb-deploy/tikv-20160"
         data_dir: "/data/tidb-data/tikv-20160"
         log_dir: "/data/tidb-deploy/tikv-20160/log"
         config:
           server.labels: { host: "tikv3" }
       monitoring_servers:
        - host: 172.31.28.209
         ssh_port: 37254
         port: 9090
         deploy_dir: "/data/tidb-deploy/prometheus-8249"
         data_dir: "/data/tidb-data/prometheus-8249"
         log_dir: "/data/tidb-deploy/prometheus-8249/log"
       grafana_servers:
        - host: 172.31.28.209
         port: 3000
         deploy_dir: /data/tidb-deploy/grafana-3000

       alertmanager_servers:
        - host: 172.31.28.209
          ssh_port: 37254
          web_port: 9093
          cluster_port: 9094
          deploy_dir: "/data/tidb-deploy/alertmanager-9093"
          data_dir: "/data/tidb-data/alertmanager-9093"
          log_dir: "/data/tidb-deploy/alertmanager-9093/log"
 
# 以上参数部分可根据系统配置调整以下参数
  #storage.block-cache.capacity: 6GB
  #storage.block-cache.shared: true
  #storage.scheduler-worker-pool-size: 3
#6 部署TIDB Server
shell> su tidb
shell> tiup cluster deploy Report_Tidb  v7.5.0 topology.ymal --user=tidb;
## 集群名称:Report_Tidb
## 版本 v7.5.0
## 用户:--user=tidb
## 部署用yaml 档:topology,yaml

shell> su tidb
shell> tiup cluster list;

# 启动集群
shell> tiup cluster start Report_Tidb --init;
## 此时生成一个重要密码 一定要记住 拷贝出来
## The new password is: 一串密码
# 9 显示集群状态
shell> su tidb
shell> tiup cluster display Report_Tidb;
# 10 TiDB Dashboard login
#使用浏览器 http://172.31.28.210:2379/dashboard 登入 输入刚刚记下的密码
# 11 Grafana monitor board
 使用浏览器 http://172.31.28.209:3000 登入
输入账号密码
admin/admin
# 脚本安装 1-3 步骤
#!/bin/bash

#开始安装日期时间
START_TIME='date +"%Y-%m-%d %H:%M:%S"'

#输出黄色文本
echo_yellow() {
   echo -e "\e[93m$1\e[0m"
}

#建立TiDB账号
echo_yellow "建立tidb账号(Create login for tidb)..."
read -s -p "Enter Password for tidb :" PASSWORD
egrep "tidb" /etc/passwd >/dev/null
if [ $? -eq 0 ]; then
         echo "用户已存在(User exists!)";
else
         useradd -m -p $PASSWORD tidb
          [ $? -eq 0 ] && echo_yellow "建立tidb 账号(Create login for tidb finish)..." || echo "Fail to add user!"
fi

# 产生tidb ssh 密钥
echo_yellow "建立ssh密钥(Generator ssh key)..."
if [ ! -f "/home/tidb/.ssh/id_rsa" ]; then
        sudo -u tidb ssh-keygen -t rsa -P  '' -f /home/tidb/.ssh/id_rsa;
        touch /home/tidb/.ssh/authorized_keys;
        chown tidb.tidb /home/tidb/.ssh/authorized_keys;
        chmod 600 /home/tidb/.ssh/authorized_keys;
         [ $? -eq 0 ] && echo_yellow "建立ssh密钥已完成(Generator ssh key finish)..." || echo "Fail to generator ssh key!"
fi

# 新增ssh 免密钥设定
 echo_yellow "建立ssh 免密钥登入(setting login without password)..."
 if ! grep -q "tidb" /etc/ssh/sshd_config; then
         sed -i 's/AllowUsers/AllowUsers tidb root/g' /etc/ssh/sshd_config
          [ $? -eq 0 ] && echo_yellow "建立ssh 免密钥登入已完成(setting login without password finish)..." || echo "Fail to set login without password!"
fi

# 强制关闭夜莺
if [ -d "/data/categraf" ]; then
          echo_yellow "关闭夜莺监控(stopping categraf)..."
          systemctl stop categraf.service
          [ $? -eq 0 ] && echo_yellow "关闭夜莺监控已完成(stoped categraf)..." || echo "Fail to stop categraf service"
fi
# 重新挂载前备份数据目录
if [ ! -z "$(ls -A /data)" ]; then
        echo_yellow "备份旧数据(backup folder)..."
        cd /data;
        tar -zcvf alldata.tar.gz *;
        mv alldata.tar.gz /home/tidb/.;
        cd /;
        echo_yellow "备份旧数据已完成(backup folder finish)..."
fi

echo_yellow "初始化数据磁盘(Data disk initial)..."
DataOldUUID='lsblk -f | grep /data | awk '//{print substr($3,1,36)}'';
DataDiskLabel='lsblk -f | grep /data | awk '//{print substr($1,3,4)}'';
umount /dev/${DataDiskLabel};
mkfs.ext4 /dev/${DataDiskLabel};
mount /dev/${DataDiskLabel} /data;
DataNewUUId='lsblk -f | grep ${DataDiskLabel} | awk '//{print substr($3,1,36)}'';
if grep -q "/data" /etc/fstab; then
         sed -i "s/UUID=${DataOldUUID}//UUID=${DataNewUUID}/g" /etc/fstab;
         sed -i "s/data xfs defaults 0 0/data ext4 defaults,nodelalloc,noatime 0 2/g" /etc/fstab;
fi

## 如果有多余的磁盘(if more than one data disk)
#Data1OldUUID='lsblk -f | grep /data | awk '//{print substr($3,1,36)}'';
#Data1DiskLabel='lsblk -f | grep /data | awk '//{print substr($1,3,4)}'';
#umount /dev/${Data1DiskLabel};
#mkfs.ext4 /dev/${Data1DiskLabel};
#mount /dev/${Data1DiskLabel} /data;
#Data1NewUUId='lsblk -f | grep ${Data1DiskLabel} | awk '//{print substr($3,1,36)}'';
#if grep -q "/data" /etc/fstab; then
         #sed -i "s/UUID=${Data1OldUUID}//UUID=${Data1NewUUID}/g" /etc/fstab;
         #sed -i "s/data xfs defaults 0 0/data ext4 defaults,nodelalloc,noatime 0 2/g" /etc/fstab;
#fi

echo_yellow "初始化数据磁盘已完成(Data disk initial finish)..."

# 恢复原资料夹及删除压缩档
if [ -f "/home/tidb/alldata.tar.gz" ]; then
        echo_yellow "还原备份数据(Restore folder)..."
        cd /data;
        tar -zxvf /home/tidb/alldata.tar.gz;
        rm -rf /home/tidb/alldata.tar.gz /data/lost+found;
        echo_yellow "还原备份数据已完成(Restore folder finish)..."
fi

#设置完成日期时间

echo ""
echo ""
echo_yellow "##################################################"
echo_yellow "##################################################"
echo_yellow "开始设置日期时间 ${START_TIME}"
echo_yellow "tidb passwd: ${PASSWORD}"
echo_yellow "Disk new uuid: ${DataNewUUID}"
echo_yellow "完成设置日期时间 ${END_TIME}"

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值