各类中间件的docker快速安装

docker-compose

定义和运行多个docker应用的工具,yaml文件配置好容器参数之后,一条命令管控所有docker容器的启停。

cd /usr/local/bin
wget https://github.com/docker/compose/releases/download/v2.3.0/docker-compose-linux-x86_64
mv docker-compose-linux-x86_64  docker-compose
chmod +x docker-compose
docker-compose --version

kafka

1. yaml文件定义kafka、zk、kafka-eagle

ke为kafka-eagle的缩写,阿里开源的kafka UI工具

#docker-compose up -d 启动
version: '3'
services:
    zookeeper:
        image: zookeeper:3.4.13
    eagle:
        image: gui66497/kafka_eagle
        container_name: ke
        restart: always
        depends_on:
            - kafka-1
            - kafka-2
        ports:
            - "10907:8048"
        environment:
            ZKSERVER: "zookeeper:2181"
    kafka-1:
        container_name: kafka-1
        image: wurstmeister/kafka:2.12-2.2.2
        ports:
            - 10903:9092
        environment:
            KAFKA_BROKER_ID: 1 
            HOST_IP: 192.168.200.129
            KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
            #docker部署必须设置外部可访问ip和端口,否则注册进zk的地址将不可达造成外部无法连接
            KAFKA_ADVERTISED_HOST_NAME: 192.168.200.129
            KAFKA_ADVERTISED_PORT: 10903 
        volumes:
            - /etc/localtime:/etc/localtime
        depends_on:
            - zookeeper           
    kafka-2:
        container_name: kafka-2
        image: wurstmeister/kafka:2.12-2.2.2
        ports:
            - 10904:9092
        environment:
            KAFKA_BROKER_ID: 2 
            HOST_IP: 192.168.200.129
            KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
            KAFKA_ADVERTISED_HOST_NAME: 192.168.200.129
            KAFKA_ADVERTISED_PORT: 10904 
        volumes:
            - /etc/localtime:/etc/localtime
        depends_on:
            - zookeeper
2. docker-compose对kafka容器的启停
# 部署并后台启动
docker-compose -f km.yml up -d

# 列出 Compose 应用中的各个容器 输出内容包括当前状态、容器运行的命令以及网络端口
docker-compose ps
# 停止并删除运行中的 Compose 应用 会删除容器和网络,但是不会删除卷和镜像
docker-compose -f km.yml down

MySQL

创建挂载的工作目录
mkdir -p /apps/mysql/{mydir,datadir,conf,source}

编写docker-compose.yaml

version: '3'
services:
  mysql:
    restart: always
    image: mysql:5.7.38 #该版本漏洞较少
    container_name: mysql-lable
    volumes:
      - /apps/mysql/mydir:/mydir
      - /apps/mysql/datadir:/var/lib/mysql
      - /apps/mysql/conf/my.cnf:/etc/my.cnf
      # 数据库还原目录 可将需要还原的sql文件放在这里
      - /apps/mysql/source:/docker-entrypoint-initdb.d
    environment:
      - "MYSQL_ROOT_PASSWORD=123456"
      - "MYSQL_DATABASE=student"
      - "TZ=Asia/Shanghai"
    ports:
      # 使用宿主机的3306端口映射到容器的3306端口
      # 宿主机:容器
      - 3306:3306

编写数据库配置文件/apps/mysql/conf/my.cnf

[mysqld]
user=mysql
default-storage-engine=INNODB
character-set-server=utf8
character-set-client-handshake=FALSE
collation-server=utf8_unicode_ci
init_connect='SET NAMES utf8'
max_connections = 4096
max_connect_errors = 100000
lower_case_table_names=1
[client]
default-character-set=utf8
[mysql]
default-character-set=utf8

启动
先检查端口有没有被占用
ss -tunlp |grep 3306
ocker-compose up -d

综合篇

kafka、zk、redis、mysql、rabbitmq、fastdfs

version: '3'
services:
  mysql:
    restart: always
    image: mysql:5.7.38 #该版本漏洞较少
    container_name: mysql
    volumes:
      - /opt/data/mysql:/var/lib/mysql
    environment:
      - "MYSQL_ROOT_PASSWORD=root"
      - "MYSQL_DATABASE=hitch"
      - "TZ=Asia/Shanghai"
    ports:
      # 使用宿主机的3306端口映射到容器的3306端口
      # 宿主机:容器
      - 3306:3306
  nacos:
    image: nacos/nacos-server
    hostname: nacos
    container_name: nacos
    restart: always
    ports:
      - 9105:8848  
    environment:
      - MODE=standalone
      - JVM_XMS=128m
      - JVM_XMX=128m
  zookeeper:
    image: zookeeper:3.4.10
    restart: always
    hostname: zookeeper
    container_name: zookeeper
    volumes:
      - /opt/data/zksingle:/data
    ports:
      - 2181:2181
    environment:
      - ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
  kafka:
    image: wurstmeister/kafka:2.12-2.2.2
    container_name: kafka
    restart: always
    depends_on:
      - zookeeper
    volumes:
      - /etc/localtime:/etc/localtime
    ports:
      - 9103:9092
    environment:
      KAFKA_BROKER_ID: 100
      HOST_IP: 192.168.200.136
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_ADVERTISED_HOST_NAME: 192.168.200.136
      KAFKA_ADVERTISED_PORT: 9103
  redis:
    image: daocloud.io/library/redis:6.0.6
    container_name: redis-prize
    hostname: redis
    ports:
      - 9010:6379
    restart: always
  mongo:
    image: docker.io/mongo:4.4
    container_name: mongo4
    ports:
      - 8090:27017
    volumes:
      - /opt/data/mongo4:/data/db
    restart: always
    ulimits:
     nofile:
       soft: 300000
       hard: 300000
  rabbitmq:
    image: daocloud.io/library/rabbitmq:3.6.10-management
    container_name: rabbit
    hostname: my-rabbit
    ports:
      - 15672:15672
      - 5672:5672      
  fastdfs-tracker:
    hostname: fastdfs-tracker
    container_name: fastdfs-tracker
    image: season/fastdfs:1.2
    network_mode: "host"
    command: tracker
    volumes:
      - ./tracker_data:/fastdfs/tracker/data
  fastdfs-storage:
    hostname: fastdfs-storage
    container_name: fastdfs-storage
    image: season/fastdfs:1.2
    network_mode: "host"
    volumes:
      - ./storage_data:/fastdfs/storage/data
      - ./store_path:/fastdfs/store_path
    environment:
      - TRACKER_SERVER=192.168.200.136:22122
    command: storage
    depends_on:
      - fastdfs-tracker
  fastdfs-nginx:
    hostname: fastdfs-nginx
    container_name: fastdfs-nginx
    image: season/fastdfs:1.2
    network_mode: "host"
    volumes:
      - ./nginx.conf:/etc/nginx/conf/nginx.conf
      - ./store_path:/fastdfs/store_path
    environment:
      - TRACKER_SERVER=192.168.200.136:22122
    command: nginx
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值