docker 安装kafka

docker-compose 安装kafka、mysql、hadoop、redis、sqlServer、oracle21c



前言


提示:以下是本篇文章正文内容,下面案例可供参考

一、使用命令

docker network list # 查看网卡
docker network create dataPlatform #创建网卡
docker-compose --file "/Users/xxx/Desktop/docker/mysql-compose.yml" --project-name "mysql-docker" up -d

注:如果没有dataPlatform 网卡则创建一个,–file 修改为对应yml文件路径 project-name 为docker 容器外部名称,另外yml文件需要放在一个固定位置,后期启停容器,都需要读取yml文件

1.kafka-compose.yml

version: "3.9" # Docker Compose 版本,目前最新的是3.9

services:
  zookeeper:
    image: docker.io/bitnami/zookeeper:3.7 # 引用image源
    container_name: zookeeper # 自定义容器名称
    hostname: zookeeper # 容器名称 hostname
    ports:
      - "2181:2181" #映射端口
    volumes:
      - "zookeeper_data:/bitnami" #挂载目录
    environment:
      - ALLOW_ANONYMOUS_LOGIN=yes #设置环境变量
  kafka:
    image: docker.io/bitnami/kafka:3
    container_name: kafka
    hostname: kafka
    ports:
      - "9092:9092"
      - "9093:9093"
    volumes:
      - "kafka_data:/bitnami"
    environment:
      - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
      - ALLOW_PLAINTEXT_LISTENER=yes
      - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,EXTERNAL:PLAINTEXT
      - KAFKA_CFG_LISTENERS=CLIENT://:9092,EXTERNAL://:9093
      - KAFKA_CFG_ADVERTISED_LISTENERS=CLIENT://kafka:9092,EXTERNAL://localhost:9093
      - KAFKA_INTER_BROKER_LISTENER_NAME=CLIENT
    depends_on:
      - zookeeper #依赖容器

volumes:
  zookeeper_data:
    driver: local
  kafka_data:
    driver: local

networks:
   default:
    external:
      name: dataPlatform #指定网卡名称

2.mysql-compose.yml

version: '3.9'
services:
  db:
    image: mysql:5.7
    restart: always
    hostname: mysql57
    container_name: mysql57
    environment:
      MYSQL_DATABASE: 'db'
      # So you don't have to use root, but you can if you like
      MYSQL_USER: 'dataPlatfrom'
      # You can use whatever password you like
      MYSQL_PASSWORD: '123456'
      # Password for root access
      MYSQL_ROOT_PASSWORD: '123456'
    ports:
      # <Port exposed> : < MySQL Port running inside container>
      - '3306:3306'
    expose:
      # Opens port 3306 on the container
      - '3306'
      # Where our data will be persisted
    volumes:
      - my-db:/var/lib/mysql
# Names our volume
volumes:
  my-db:

networks:
 default:
  external:
    name: dataPlatform

3.hadoop-compose.yml

version: "3"

services:

  namenode:
    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
    container_name: namenode
    hostname: namenode
    restart: always
    ports:
      - 9870:9870
      - 9000:9000
    volumes:
      - hadoop_namenode:/hadoop/dfs/name
    environment:
      - CLUSTER_NAME=test
    env_file:
      - ./hadoop.env

  datanode:
    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
    container_name: datanode
    hostname: datanode
    restart: always
    volumes:
      - hadoop_datanode:/hadoop/dfs/data
    environment:
      SERVICE_PRECONDITION: "namenode:9870"
    env_file:
      - ./hadoop.env
  
  resourcemanager:
    image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
    container_name: resourcemanager
    hostname: resourcemanager
    restart: always
    ports:
      - 8088:8088
    volumes:
      - ~/Downloads/flink-1.13.3:/opt/flink
      - ~/Downloads/executeJar:/opt/executeJar
    environment:
      HADOOP_CLASSPATH : "/etc/hadoop:/opt/hadoop-3.2.1/share/hadoop/common/lib/*:/opt/hadoop-3.2.1/share/hadoop/common/*:/opt/hadoop-3.2.1/share/hadoop/hdfs:/opt/hadoop-3.2.1/share/hadoop/hdfs/lib/*:/opt/hadoop-3.2.1/share/hadoop/hdfs/*:/opt/hadoop-3.2.1/share/hadoop/mapreduce/lib/*:/opt/hadoop-3.2.1/share/hadoop/mapreduce/*:/opt/hadoop-3.2.1/share/hadoop/yarn:/opt/hadoop-3.2.1/share/hadoop/yarn/lib/*:/opt/hadoop-3.2.1/share/hadoop/yarn/*"
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864"
    env_file:
      - ./hadoop.env

  nodemanager:
    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
    container_name: nodemanager
    hostname: nodemanager
    restart: always
    ports:
      - 8042:8042
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 resourcemanager:8088"
    env_file:
      - ./hadoop.env
  
  historyserver:
    image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
    container_name: historyserver
    hostname: historyserver
    restart: always
    ports:
      - 8188:8188
    environment:
      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode:9864 resourcemanager:8088"
    volumes:
      - hadoop_historyserver:/hadoop/yarn/timeline
    env_file:
      - ./hadoop.env

volumes:
  hadoop_namenode:
  hadoop_datanode:
  hadoop_historyserver:

networks:
   default:
    external:
      name: dataPlatform

4.redis-compose.yml

version: "3.9"

services:
  redis:
    image: redis:latest
    hostname: redis
    container_name: redis
    restart: always
    ports:
      - 6379:6379
    volumes:
      - ~/Desktop/docker/redis/redis.conf:/usr/local/etc/redis/redis.conf:rw
      - ~/Desktop/docker/redis/data:/data:rw
    command:
      /bin/bash -c "redis-server /usr/local/etc/redis/redis.conf "


networks:
 default:
  external:
    name: dataPlatform

5.sqlServer-compose.yml

version: "3.9"
services:
    db:
        image: "mcr.microsoft.com/mssql/server"
        hostname: sqlServer
        container_name: sqlServer
        restart: always
        ports:
          - "1433:1433"
        environment:
            SA_PASSWORD: "Data123456#"
            ACCEPT_EULA: "Y"
networks:
 default:
  external:
    name: dataPlatform

6.oracle21c-compose.yml

version: "3.9"
services:
    oracle:
      image: xjxjin/oracle-21c:21.3.0-ee
      restart: unless-stopped
      hostname: oracle21c
      container_name: oracle21c
      restart: always
      ports:
        - 1521:1521
        - 5500:5500
      volumes:
        - ~/Desktop/docker/oracle/21.3:/opt/oracle/oradata
        - ~/Desktop/docker/oracle/database:/opt/oracle/21.3.0-ee
      environment: 
        ORACLE_SID : 'orcl'
        ORACLE_PWD : 123456
        ORACLE_EDITION: 'standard'
        ENABLE_ARCHIVELOG : 'true'
networks:
 default:
  external:
    name: dataPlatform

注:volumes参数中冒号左边需要修改为本地的一个目录,建议和yml文件同级,用于存放容器临时文件

2.执行语句

代码如下(示例):

docker-compose --project-name “kafka-docker” up -d

docker-compose --file "/Users/xxxx/Downloads/docker-compose.yml" --project-name "kafka-docker" up -d

总结

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值