中间件部署
1、下载
文件地址
链接:http://pan.dhcc.cloud/share/detail/941186b9-173a-4369-48a4-2dd0955df4fb 提取码:gdvn
2、安装docker
2.1、安装安装-docker依赖
选择合适版本的docker安装包将安装文件(docker-install.tar.gz)上传至上文提到的docker安装包存放路径…/aml/tar目录下,以下以/home/aml/tar路径做演示
[root@localhost ~]# cd /home/aml/tar
[root@localhost tar]# tar zxf docker-install.tar.gz
[root@localhost tar]# cd 01
[root@localhost 01]# sudo rpm -Uvh *.rpm --nodeps --force
[root@localhost 01]# cd ../02/
[root@localhost 01]# sudo rpm -Uvh *.rpm --nodeps --force
2.2、启动docker
启动docker并将docker加入开机启动
[root@localhost 02]# sudo systemctl start docker
[root@localhost 02]# sudo systemctl enable docker
2.3 安装docker-compose
下载https://github.com/docker/compose/releases/download/1.28.5/docker-compose-Linux-x86_64
将下载好的文件docker-compose-Linux-x86_64文件上传至/bin/目录并重命名docker-compose-Linux-x86_64文件
mv /bin/docker-compose-Linux-x86_64 /bin/docker-compose
赋予可执行权限
chmod +x /bin/docker-compose
3、 Docker构建redis
3.1 添加redis镜像
将redis的镜像tar包上传至…/aml/tar目录下,以下以/home/aml/tar路径做演示
[root@localhost ~]# cd /home/aml/tar
#导入redis镜像
[root@localhost tar]# docker load -i redis-5.0.9.tar
#查看当前已添加的镜像
[root@localhost tar]# docker images
3.2 编写docker-compos文件实现redis一主二从
创建redis目录
cd /home/aml/docker
mkdir redis && cd redis
vim docker-compose.yml
version: '3.7'
services:
master:
image: aml-redis:5.0.9
container_name: redis-master
restart: always
command: redis-server --port 6379 --requirepass ZQxP#GXT --appendonly yes
ports:
- 6379:6379
volumes:
- ./data:/data
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
slave1:
image: aml-redis:5.0.9
container_name: redis-slave-1
restart: always
command: redis-server --slaveof 172.28.30.183 6379 --port 6380 --requirepass ZQxP#GXT --masterauth ZQxP#GXT --appendonly yes
ports:
- 6380:6380
volumes:
- ./data:/data
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
slave2:
image: aml-redis:5.0.9
container_name: redis-slave-2
restart: always
command: redis-server --slaveof 172.28.30.183 6379 --port 6381 --requirepass ZQxP#GXT --masterauth ZQxP#GXT --appendonly yes
ports:
- 6381:6381
volumes:
- ./data:/data
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
名词 | 描述 |
---|---|
version | docker文件的版本 |
image | 指定容器镜像就是之前拉取的redis镜像 |
container_name | 给这个镜像起一个别名 |
restart | always:表名开机自启动 |
command | 相当于执行一些命令 (–requirepass 指定redis密码 --appendonly yes 这个命令是用于开启redis数据持久化) |
ports | 端口映射,将容器的端口映射到对应宿主机的端口 |
volumes | 数据卷的映射.因为一旦容器停止了那么里面的数据也没有.所以我们需要把这个数据文件放在外面,然后映射到容器中 |
3.3 启动redis,使用如下命令 -d 挂在后台
使用docker-compose up –d 启动
使用docker-compose ps 或 docker ps 查看运行的容器
注:使用docker-compose ps查看需要和docker-compose.yml文件同一目录,且只查看此文件启动的容器
3.4 测试redis主从
dcoker exec it d8dd334354e6 bash
注:d8dd334354e6 此编号为容器ID
然后登陆从redis查看
docker exec -it 4e06c60bc5f1 bash
测试从redis没有写权限
4、 部署redis-sentinel
4.1 编写docker-compose.yml文件
创建sentinel目录
cd /home/aml/docker
mkdir sentinel && cd sentinel
vim docker-compose.yml
version: '3.7'
services:
sentinel1:
image: aml-redis:5.0.9
container_name: redis-sentinel-1
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
restart: always
ports:
- 26379:26379
volumes:
- ./sentinel1.conf:/usr/local/etc/redis/sentinel.conf
sentinel2:
image: aml-redis:5.0.9
container_name: redis-sentinel-2
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
restart: always
ports:
- 26380:26379
volumes:
- ./sentinel2.conf:/usr/local/etc/redis/sentinel.conf
sentinel3:
image: aml-redis:5.0.9
container_name: redis-sentinel-3
command: redis-sentinel /usr/local/etc/redis/sentinel.conf
restart: always
ports:
- 26381:26379
volumes:
- ./sentinel3.conf:/usr/local/etc/redis/sentinel.conf
4.2 编写sentinel.conf
vim sentinel.conf
port 26379
dir /tmp
#自定义集群名,其中 172.28.30.183 为 redis-master 的 ip,6379 为 redis-master 的端口,2 为最小投票数(因为有 3 台 Sentinel 所以可以设置成 2)
sentinel monitor mymaster 172.28.30.183 6379 2
sentinel down-after-milliseconds mymaster 30000
sentinel parallel-syncs mymaster 1
sentinel auth-pass mymaster ZQxP#GXT
sentinel failover-timeout mymaster 180000
sentinel deny-scripts-reconfig yes
执行如下命令,复制3份redis-sentinel配置文件
cp sentinel.conf sentinel1.conf
cp sentinel.conf sentinel2.conf
cp sentinel.conf sentinel3.conf
4.3启动redis-sentinel,并查看其状态
docker-compose up –d
docker-compose ps
进入redis-sentinel容器中,查看redis连接状态
docker exec -it 4da5d20ffb6b bash
执行如下命令,查看redis主信息
root@4da5d20ffb6b:/data#redis-cli -p 26379
127.0.0.1:26379> sentinel master mymaster
#注:mymaster为定义的集群名称
执行如下命令,查看从redis信息是否正常
[root@localhost]#docker exec -it 5cbbde3abf97 bash
root@5cbbde3abf97:/data# redis-cli -p 26379
127.0.0.1:26379> sentinel slaves mymaster
2021年3月25补充
基于docker安装Redis Sentinel 集群导致的NAT网络问题解决方法
对外声明的ip
在redi和sentinel配置文件添加
slave-announce-ip 5.5.5.5
slave-announce-port 1234
5、 部署rabbitMQ
5.1 上传并导入rabbitMQ镜像
将rabbitMQ的镜像tar包上传至…/aml/tar目录下,以下以/home/aml/tar路径做演示
[root@localhost ~]# cd /home/aml/tar
#导入redis镜像
[root@localhost tar]# docker load -i rabbitMQ-3.7.25.tar
#查看当前已添加的镜像
[root@localhost tar]# docker images
5.2 创建docker-compose.yml文件
创建rabbitMQ目录
cd /home/aml/docker
mkdir rabbitmq && cd rabbitmq
vim docker-compose.yml
version: '3'
services:
rabbitmq1:
image: aml-rabbitmq:3.7.25
deploy:
resources:
limits:
cpus: '2'
memory: 4G
restart_policy:
condition: any
ports:
- "15672:15672"
- "5672:5672"
hostname: rabbitmq1
container_name: rabbitmq1
environment:
- RABBITMQ_ERLANG_COOKIE=rabbitcookie
volumes:
- /home/aml/docker/rabbitmq/rabbitmq1:/var/lib/rabbitmq
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
rabbitmq2:
image: aml-rabbitmq:3.7.25
deploy:
resources:
limits:
cpus: '2'
memory: 4G
restart_policy:
condition: any
ports:
- "5673:5672"
hostname: rabbitmq2
container_name: rabbitmq2
environment:
- RABBITMQ_ERLANG_COOKIE=rabbitcookie
volumes:
- /home/aml/docker/rabbitmq/rabbitmq2:/var/lib/rabbitmq
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
rabbitmq3:
image: aml-rabbitmq:3.7.25
deploy:
resources:
limits:
cpus: '2'
memory: 8G
restart_policy:
condition: any
ports:
- "5674:5672"
hostname: rabbitmq3
container_name: rabbitmq3
environment:
- RABBITMQ_ERLANG_COOKIE=rabbitcookie
volumes:
- /home/aml/docker/rabbitmq/rabbitmq3:/var/lib/rabbitmq
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
5.2 启动rabbitMQ
docker-compose up –d
查看rabbitmq状态
docker-compose ps 或 docker ps |grep rabbitmq
5.3 创建rabbitMQ初始化脚本
vim rabbitmq.sh
#!/bin/bash
#reset first node
echo "Reset first rabbitmq node."
docker exec rabbitmq1 /bin/bash -c 'rabbitmqctl stop_app'
docker exec rabbitmq1 /bin/bash -c 'rabbitmqctl reset'
docker exec rabbitmq1 /bin/bash -c 'rabbitmqctl start_app'
#build cluster
echo "Starting to build rabbitmq cluster with two ram nodes."
docker exec rabbitmq2 /bin/bash -c 'rabbitmqctl stop_app'
docker exec rabbitmq2 /bin/bash -c 'rabbitmqctl reset'
docker exec rabbitmq2 /bin/bash -c 'rabbitmqctl join_cluster --ram rabbit@rabbitmq1'
docker exec rabbitmq2 /bin/bash -c 'rabbitmqctl start_app'
docker exec rabbitmq3 /bin/bash -c 'rabbitmqctl stop_app'
docker exec rabbitmq3 /bin/bash -c 'rabbitmqctl reset'
docker exec rabbitmq3 /bin/bash -c 'rabbitmqctl join_cluster --ram rabbit@rabbitmq1'
docker exec rabbitmq3 /bin/bash -c 'rabbitmqctl start_app'
#check cluster status
echo "Check cluster status:"
docker exec rabbitmq1 /bin/bash -c 'rabbitmqctl cluster_status'
docker exec rabbitmq2 /bin/bash -c 'rabbitmqctl cluster_status'
docker exec rabbitmq3 /bin/bash -c 'rabbitmqctl cluster_status'
echo "Starting to create user."
docker exec rabbitmq1 /bin/bash -c 'rabbitmqctl add_user admin df80af9ca0c9'
echo "Set tags for new user."
docker exec rabbitmq1 /bin/bash -c 'rabbitmqctl set_user_tags admin administrator'
echo "Grant permissions to new user."
docker exec rabbitmq1 /bin/bash -c "rabbitmqctl set_permissions -p '/' admin '.*' '.*' '.*'"
5.4 执行初始化脚本:
chmod +x rabbitmq.sh
./rabbitmq.sh
或
sh rabbitmq.sh
使用 http://172.28.30.183:15672/#/登陆测试
用户名:admin
密码:df80af9ca0c9
注:用户名密码为脚本定义!
6、 部署nginx 负载均衡
6.1 导入nginx镜像
将nginx的镜像tar包上传至…/aml/tar目录下,以下以/home/aml/tar路径做演示
进入目录
[root@localhost ~]# cd /home/aml/tar
#导入redis镜像
[root@localhost tar]# docker load -i aml-nginx-1.13.3.tar
#查看当前已添加的镜像
[root@localhost tar]# docker images
6.2创建docker-compose.yml文件
创建nginx目录
cd /home/aml/docker
mkdir nginx&& cd nginx
vim docker-compose.yml
version: "3"
services:
nginx-lsb:
container_name: nginx-lsb
image: "aml-nginx:1.13.3"
ports:
- "80:80"
privileged: true
volumes:
- "$PWD/lsb/nginx.conf:/etc/nginx/nginx.conf"
networks:
default:
ipv4_address: ${nginx_lsb}
web1:
container_name: web1
image: "aml-nginx:1.13.3"
ports:
- "8080:80"
privileged: true
volumes:
- $PWD/web1/html:/etc/nginx/html
- $PWD/web1/log:/var/log/nginx:rw
- $PWD/web1/config/nginx.conf:/etc/nginx/nginx.conf
- $PWD/web1/config/conf.d:/etc/nginx/conf.d
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
networks:
default:
ipv4_address: ${web1_addr}
web2:
container_name: web2
image: "aml-nginx:1.13.3"
ports:
- "8081:80"
privileged: true
volumes:
- $PWD/web2/html:/etc/nginx/html
- $PWD/web2/log:/var/log/nginx:rw
- $PWD/web2/config/nginx.conf:/etc/nginx/nginx.conf
- $PWD/web2/config/conf.d:/etc/nginx/conf.d
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
networks:
default:
ipv4_address: ${web2_addr}
networks:
default:
driver: bridge
ipam:
config:
- subnet: 172.21.0.0/16
6.3 创建.env变量文件
vim .env
web1_addr=172.21.0.2
web2_addr=172.21.0.3
nginx_lsb=172.21.0.100
6.4 创建nginx挂载目录及配置文件
创建目录
cd /home/aml/docker/nginx
mkdir web1/config –p
mkdir web2/config –p
mkdir lsb
创建配置文件
vim web1/config/nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
client_max_body_size 50m;
################# amlgateway ################
upstream amlgateway{
server 127.0.0.1:8765 weight=10;
}
server {
listen 80;
server_name localhost;
location / {
root html/dist;
index index.html index.htm;
}
location /admin {
alias html/admin/dist;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
location /api{
proxy_pass http://amlgateway;
}
}
}
复制web1/config下的nginx.conf到web2/config下
cp web1/config/nginx.conf web2/config/
创建负载均衡配置文件
vim lsb/nginx.conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
upstream mydocker {
server 172.21.0.2;
server 172.21.0.3;
}
server {
listen 80;
server_name mydocker;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
proxy_pass http://mydocker;
}
}
}
6.5 启动nginx
docker-compose up –d
查看nginx状态
docker-compose ps
7、 部署MySQL数据库
7.1 导入MySQL镜像
进入目录
[root@localhost ~]# cd /opt/images/
#导入mysql镜像
[root@localhost images]# docker load -i aml-mysql-5.7.29.tar
#查看当前已添加的镜像
[root@localhost images]# docker images
5.2 新建配置文件
vim /opt/aml/docker/mysql/conf/docker.cnf
[mysqld]
skip-host-cache
skip-name-resolve
lower_case_table_names=1
character-set-server=utf8
default-time-zone='+8:00'
tmpdir=/tmp
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8
max_allowed_packet=16M
• lower_case_table_names=1表示:表名存储在磁盘是小写的,但是比较的时候是不区分大小写
• default-time-zone=’+8:00’:更改时区
• tmpdir:用于存储临时文件或临时表,默认为/tmp
5.3 启动MySQL
创建临时文件夹,并赋予权限
[root@localhost ~]# mkdir -p /opt/aml/docker/mysql/tmp
[root@localhost ~]# chmod 1777 /opt/aml/docker/mysql/tmp
[root@localhost ~]# docker run -d -p 3306:3306 --name aml-mysql-5.7.29 --restart=always \
-v /opt/aml/docker/mysql/data:/var/lib/mysql \
-v /opt/aml/docker/mysql/conf:/etc/mysql/conf.d \
-v /opt/aml/docker/mysql/logs:/logs \
-v /opt/aml/docker/mysql/tmp:/tmp \
-v /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime \
-e MYSQL_ROOT_PASSWORD=root \
aml-mysql:5.7.29