零:前置准备执行以下命令后执行docker命令不用输入sudo前缀:
sudo groupadd docker
sudo usermod -aG docker hj_nlp_gd
sudo systemctl restart docker
newgrp docker
一.挂载数据卷
1.1 登录远程服务器: ssh -p 58022 hj_nlp_gd@172.83.63.18
1.2 df -h 命令查询已挂载目录
Lsblk 命令查看未挂载数据卷
mkfs.ext4 /dev/vdb(具体的需要挂载的数据卷)
Df -T 查询挂载数据卷的初始化文件系统类型
Mkdir /data 创建挂载点
Mount /dev/vdb /data 挂载目录
二:安装Docker
- 远程传输文件指定端口命令: scp -P 58022 docker-27.1.1.tgz hj_nlp_gd@172.83.63.29:/data
- 安装docker使用的命令:
tar -zxvf docker-23.0.6.tgz
cp -rf docker/* /usr/bin/
vi /usr/lib/systemd/system/docker.service
第三行命令内容:
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
WorkingDirectory=/usr/local/bin
ExecStart=/usr/bin/dockerd \
-H tcp://0.0.0.0:4243 \
-H unix:///var/run/docker.sock \
--selinux-enabled=false \
--log-opt max-size=100m
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
systemctl enable docker.service
systemctl start docker
docker version
2.3 查看用户组命令
Cat /etc/group
三:安装中间件
3.1 zookeeper:3.4.14集群安装
docker run -d -p 2181 --name zookeeper1 --network host -e ZOO_MY_ID=1 -e ZOO_SERVERS="server.1=172.83.63.27:2888:3888 server.2=172.83.63.28:2888:3888 server.3=172.83.63.29:2888:3888" zk:3.4.14
docker run -d -p 2181 --name zookeeper2 --network host -e ZOO_MY_ID=2 -e ZOO_SERVERS="server.1=172.83.63.27:2888:3888 server.2=172.83.63.28:2888:3888 server.3=172.83.63.29:2888:3888" zk:3.4.14
docker run -d -p 2181 --name zookeeper3 --network host -e ZOO_MY_ID=3 -e ZOO_SERVERS="server.1=172.83.63.27:2888:3888 server.2=172.83.63.28:2888:3888 server.3=172.83.63.29:2888:3888" zk:3.4.14
3.2 kafka:3.6.2集群安装
docker run -d --name kafka1 --network host -e KAFKA_BROKER_ID=1 -e KAFKA_ZOOKEEPER_CONNECT=172.83.63.27:2181,172.83.63.28:2181,172.83.63.29:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.83.63.27:9092 -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3 kafka:3.6.2
docker run -d --name kafka2 --network host -e KAFKA_BROKER_ID=2 -e KAFKA_ZOOKEEPER_CONNECT=172.83.63.27:2181,172.83.63.28:2181,172.83.63.29:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.83.63.28:9092 -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3 kafka:3.6.2
docker run -d --name kafka3 --network host -e KAFKA_BROKER_ID=3 -e KAFKA_ZOOKEEPER_CONNECT=172.83.63.27:2181,172.83.63.28:2181,172.83.63.29:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.83.63.29:9092 -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=3 kafka:3.6.2
3.3 mysql双主双从安装:
下载mysql5.7.43的 rpm包
Centos7出现mariadb包冲突,centos7默认安装mariadb包,解决方法:
查看:rpm -qa|grep mariadb
强制卸载:rpm -e --nodeps mariadb-libs-5.5.44-2.el7.centos.x86_64
3.3.1 使用rpm -ivh 命令安装mysql包:只需要安装common、libs、client、server包
3.3.2 安装完成后使用sudo systemctl start mysqld
systemctl status mysqld.service 查询mysql服务的启动状态
3.3.3:查看mysql的首次临时密码:sudo grep 'temporary password' /var/log/mysqld.log
3.3.4:修改root用户的密码:ALTER USER 'root'@'localhost' IDENTIFIED BY 'new_password';
3.3.5:创建主从复制copy账户:
CREATE USER 'replica'@'%' IDENTIFIED BY 'replica_password';
GRANT REPLICATION SLAVE ON *.* TO 'replica'@'%';
FLUSH PRIVILEGES;
3.3.6:编辑mysql的配置文件mysql.cnf文件增加配置:
server-id=133 #唯一id
log-bin=mysql-bin #主从复制bin-log日志必须配置
auto-increment-increment=2
auto-increment-offset=2
log-slave-updates
3.3.7:重启mysql服务
sudo systemctl restart mysqld
3.3.8:获取主服务器的bin-log二进制文件的位置
SHOW MASTER STATUS;
3.3.8:配置主从服务
CHANGE MASTER TO
MASTER_HOST='172.83.63.28',
MASTER_USER='repl',
MASTER_PASSWORD='y&cUkv6cP4bAt4',
MASTER_LOG_FILE='mysql-bin.000001',
MASTER_LOG_POS= 154;
START SLAVE;
SHOW SLAVE STATUS\G;
3.4 nacos集群部署
3.4.1 创建nacos_config数据库:CREATE DATABASE nacos_config CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
3.4.2 初始化nacos数据库初始化脚本:mysql -u root-p'nacos_password' nacos_config < nacos-db.sql
3.4.3 mysql命令行中执行:update user set host = '%' where host = 'localhost' and user = 'root';
3.4.4:创建nacos配置文件文件夹:sudo mkdir -p /opt/nacos/conf /opt/nacos/init.d
3.4.5:编辑/conf文件夹下cluster.conf:
Sudo vi cluster.conf
172.83.63.27:8848
172.83.63.28:8848
172.83.63.29:8848
3.4.5 编辑/init.d文件夹下custom.properties
Sudo vi custom.properties
spring.datasource.platform=mysql
db.num=2
db.url.0=jdbc:mysql://172.83.63.27:3306/nacos_db?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true
db.url.1=jdbc:mysql://172.83.63.28:3306/nacos_db?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true
db.user=root
db.password=T4NECz3&#C8fef
3.4.6:使用docker run 命令启动nacos集群:
docker run -d --name nacos1 --network host -e PREFER_HOST_MODE=ip -e MODE=cluster -e NACOS_SERVERS="172.83.63.27:8848 172.83.63.28:8848 172.83.63.29:8848" -e SPRING_DATASOURCE_PLATFORM=mysql -e MYSQL_SERVICE_HOST="172.83.63.27,172.83.63.28" -e MYSQL_SERVICE_PORT=3306 -e MYSQL_SERVICE_DB_NAME=nacos_config -e MYSQL_SERVICE_USER=root -e MYSQL_SERVICE_PASSWORD="T4NECz3&#C8fef" -v /opt/nacos/conf/cluster.conf:/home/nacos/conf/cluster.conf -v /opt/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties nacos:2.1.1
docker run -d --name nacos2 --network host -e PREFER_HOST_MODE=ip -e MODE=cluster -e NACOS_SERVERS="172.83.63.27:8848 172.83.63.28:8848 172.83.63.29:8848" -e SPRING_DATASOURCE_PLATFORM=mysql -e MYSQL_SERVICE_HOST="172.83.63.27,172.83.63.28" -e MYSQL_SERVICE_PORT=3306 -e MYSQL_SERVICE_DB_NAME=nacos_config -e MYSQL_SERVICE_USER=root -e MYSQL_SERVICE_PASSWORD="T4NECz3&#C8fef" -v /opt/nacos/conf/cluster.conf:/home/nacos/conf/cluster.conf -v /opt/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties nacos:2.1.1
docker run -d --name nacos3 --network host -e PREFER_HOST_MODE=ip -e MODE=cluster -e NACOS_SERVERS="172.83.63.27:8848 172.83.63.28:8848 172.83.63.29:8848" -e SPRING_DATASOURCE_PLATFORM=mysql -e MYSQL_SERVICE_HOST="172.83.63.27,172.83.63.28" -e MYSQL_SERVICE_PORT=3306 -e MYSQL_SERVICE_DB_NAME=nacos_config -e MYSQL_SERVICE_USER=root -e MYSQL_SERVICE_PASSWORD="T4NECz3&#C8fef" -v /opt/nacos/conf/cluster.conf:/home/nacos/conf/cluster.conf -v /opt/nacos/init.d/custom.properties:/home/nacos/init.d/custom.properties nacos:2.1.1
注意:nacos REFER_HOST_MODE 主机模式参数必须与 NACOS_SERVERS 参数保持一致,否则后报错
3.5 redis哨兵模式集群部署
3.5.1 分别在三台centos服务器创建文件夹并赋予权限:
mkdir /software/redis # 主节点 mkdir /software/redis/master mkdir /software/redis/master/conf mkdir /software/redis/master/data # 从节点1 mkdir /software/redis/salve1 mkdir /software/redis/salve1/conf mkdir /software/redis/salve1/data # 从节点2 mkdir /software/redis/salve1 mkdir /software/redis/salve1/conf mkdir /software/redis/salve1/data
Chmod -R 777 /software/redis
3.5.2 在conf目录下配置redis.conf文件
bind 127.0.0.1 #注释掉这部分,这是限制redis只能本地访问 protected-mode no #默认yes,开启保护模式,限制为本地访问 daemonize no #默认no,改为yes意为以守护进程方式启动,可后台运行,除非kill进程,改为yes会使配置文件方式启动redis失败 databases 16 #数据库个数(可选),我修改了这个只是查看是否生效。。 dir ./ #输入本地redis数据库存放文件夹(可选) appendonly yes #redis持久化(可选) logfile "access.log" # redis密码配置 # requirepass master_123 requirepass salve_123 # 修改端口号 port 7001 # port 7002 # port 7003 ############################以下配置为从服务器配置,主服务器无需配置################# #下面的IP和端口号为表示为redis主服务器 replicaof 192.168.3.115 7001 # 如果主服务器配置了密码,则需要添加如下命令(123456表示redis主服务器的密码) masterauth 123456
3.5.3 执行docker run 命令
docker run --net host --name redis1 \
-v /opt/redis/master/conf/redis.conf:/usr/local/etc/redis/redis.conf \
-v /opt/redis/master/data:/data \
--privileged=true -d redis:latest redis-server /usr/local/etc/redis/redis.conf
docker run --net host --name redis2 \
-v /opt/redis/slave1/conf/redis.conf:/usr/local/etc/redis/redis.conf \
-v /opt/redis/slave1/data:/data \
--privileged=true -d redis:latest redis-server /usr/local/etc/redis/redis.conf
docker run --net host --name redis3 \
-v /opt/redis/slave2/conf/redis.conf:/usr/local/etc/redis/redis.conf \
-v /opt/redis/slave2/data:/data \
--privileged=true -d redis:latest redis-server /usr/local/etc/redis/redis.conf
3.5.4 创建redis-sentinel哨兵目录并执行权限同redis目录过程
3.5.5 在conf目录下创建sentinel.conf文件
# 哨兵使用的端口号,默认是26379,可以更改 port 8001 # port 8002 # port 8003 # 文件存储位置 dir "/" # 日志保存的文件路径(该文件路径必须存在) logfile "/sentinel.log" # 哨兵监控IP为192.168.3.115端口号为7002的redis主服务器,主服务器名称为mymaster,最后的数字表示数量,例如3台哨兵,其中有2台哨兵认为redis主服务器已经宕机,则主机已经宕机,否则不认为redis的主服务器宕机 sentinel monitor mymaster 192.168.3.115 7002 2 # 在指定的毫秒数内,若主节点没有应答哨兵的 PING 命令,此时哨兵认为服务器主观下线,默认时间为 30 秒。 sentinel down-after-milliseconds mymaster 5000 # 如果主服务器配置了密码,则哨兵也必须配置密码,否则哨兵无法对主从服务器进行监控,该密码与主服务器密码一致 sentinel auth-pass mymaster 123456 # redis哨兵密码 requirepass 123456
3.5.6 执行docker run 创建 redis-sentinel哨兵集群
sudo docker run --net host --name redis-sentinel1 -v /opt/redis-sentinel/node1/sentinel.conf:/usr/local/etc/redis/sentinel.conf -d redis:latest redis-sentinel /usr/local/etc/redis/sentinel.conf
sudo docker run --net host --name redis-sentinel2 -v /opt/redis-sentinel/node2/sentinel.conf:/usr/local/etc/redis/sentinel.conf -d redis:latest redis-sentinel /usr/local/etc/redis/sentinel.conf
sudo docker run --net host --name redis-sentinel3 -v /opt/redis-sentinel/node3/sentinel.conf:/usr/local/etc/redis/sentinel.conf -d redis:latest redis-sentinel /usr/local/etc/redis/sentinel.conf
3.6 es集群部署
3.6.1:创建es的conf、data、logs文件夹并赋予权限
Sudo mkdir -p /opt/es/conf /opt/es/data /opt/es/data
Sudo chmod -R 777 /opt/es
3.6.2 创建conf/es-master.yml conf/es-slave1.yml conf/es-slave2.yml
cluster.name: es-cluster
node.name: es-master # es-slave1 es-slave2
node.master: true
node.data: true
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
network.host: 172.83.63.27 #172.83.63.28 172.83.63.29
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.seed_hosts: ["172.83.63.27","172.83.63.28","172.83.63.29"]
cluster.initial_master_nodes: ["172.83.63.27"]
discovery.zen.minimum_master_nodes: 2
3.6.3 执行docker run 命令
docker run -d --net host \
-v /opt/es/conf/es-master.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /opt/es/conf/data:/usr/share/elasticsearch/data \
-v /opt/es/conf/logs:/usr/share/elasticsearch/logs \
--name es-master es:7.3.2
docker run -d --net host \
-v /opt/es/conf/es-slave1.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /opt/es/conf/data:/usr/share/elasticsearch/data \
-v /opt/es/conf/logs:/usr/share/elasticsearch/logs \
--name es-slave1 es:7.3.2
docker run -d --net host \
-v /opt/es/conf/es-slave2.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /opt/es/conf/data:/usr/share/elasticsearch/data \
-v /opt/es/conf/logs:/usr/share/elasticsearch/logs \
--name es-slave2 es:7.3.2
3.7 kibana单实例部署
docker run -d --net host \
-v /opt/kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml \
-v /opt/kibana/data:/usr/share/kibana/data \
-v /opt/kibana/logs:/usr/share/kibana/logs \
--name kibana kibana:7.3.2
3.8 SFTP部署
sudo systemctl start sshd
sudo systemctl enable sshd
3.9 es-job单实例部署
docker run -d --name es-job es-job:v1.0