组件Docker化
组件Docker化
elasticsearch(5.1.2)容器化
//docker下载原始镜像
docker pull daocloud.io/library/elasticsearch:5.1.2
//创建原始容器
docker run -it -d --name elasticsearch5.1.2 -p 9200:9200 -p 9300:9300 elasticsearch:5.1.2
//下载需要修改的配置文件到本地
docker cp elasticsearch5.1.2:/etc/security/limits.conf /data/limits.conf
docker cp elasticsearch5.1.2:/usr/share/elasticsearch /config/elasticsearch.yml /data/elasticsearch.yml
docker cp elasticsearch5.1.2:/etc/sysctl.conf /data/sysctl.conf
//修改配置文件(没有修改内存占用,默认与jvm内存大小一致)
https://kiwi.yginsight.com/kiwi/e/1b785c19-c539-4a57-a013-563294fb9e37?mod=view
//进入容器
docker exec -it elasticsearch5.1.2 /bin/bash
//删除需要修改的配置文件
rm -rf /etc/security/limits.conf
rm -rf /etc/security/limits.conf
rm -rf /etc/security/limits.conf
//上传配置文件
docker cp /data/limits.conf elasticsearch5.1.2:/etc/security/limits.conf
docker cp /data/sysctl.conf elasticsearch5.1.2:/etc/sysctl.conf
docker cp /data/elasticsearch.yml elasticsearch5.1.2:/usr/share/elasticsearch/config
//提交新的镜像
docker commit elasticsearch5.1.2 elasticsearch-dev
//启动新镜像
docker run -d -e "ES_JAVA_OPTS=-Xms512m -Xmx512m" --name elasticsearch -p 9200:9200 -p 9300:9300 -v /data/docker/volumes/elasticsearch/data/:/usr/share/elasticsearch/data/ -e "discovery.type=single-node" elasticsearch-dev:5.1.2
//通过docker-compose.yml启动服务
docker-compose up -d
修改limits.conf、sysctl.conf
vi /etc/security/limits.conf
# 添加下列内容
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
vi /etc/sysctl.conf
# 添加下面配置:
vm.max_map_count=655360
# 保存退出并执行命令:
sysctl -p
elasticsreach.yml
cluster.name: mat-cluster
node.name: node1
network.bind_host: 0.0.0.0
network.publish_host: 10.51.103.78
#network.bind_host: 127.0.0.1
#network.host: 0.0.0.0
transport.tcp.port: 9300
#network.publish_host: 54.223.232.95
#transport.tcp.compress: true
# allow origine
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["10.51.103.78:9300"]
elasticsreach-dev镜像的docker-compose.yml
version: '3'
services:
elasticsearch:
image: elasticsearch-dev:5.1.2
container_name: elasticsearch
ports:
- "9200:9200"
- "9300:9300"
volumes:
- /data/docker/volumes/elasticsearch/data/:/usr/share/elasticsearch/data/
- /data/docker/volumes/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- /etc/security/limits.conf:/etc/security/limits.conf
- /etc/sysctl.conf:/etc/sysctl.conf
environment:
- ES_JAVA_OPTS=-Xms512m -Xmx512m
- discovery.type=single-node
restart: on-failure
redis(5.0.9)容器化
//docker下载原始镜像
docker pull redis:5.0.9
//创建原始容器
docker run -d --name redis5.0.9 -p 6379:6379 redis:5.0.9
//创建本地卷目录
mkdir -p /data/docker/volumes/redis/
mkdir -p /data/docker/volumes/redis/data
mkdir -p /data/docker/volumes/redis/logs/
//创建文件
touch redis.conf
touch redis.log
//进入容器
docker exec -it redis5.0.9 /bin/bash
//创建文件夹
mkidr -p /data/redis/pid
mkdir -p /data/redis/log
mkdir -p /data/redis/data
mkdir -p /etc/redis
//上传配置文件
docker cp /data/docker/volumes/redis/conf/redis.conf redis5.0.9:/etc/redis
//进入容器redis.conf赋权
chmod 777 /etc/redis/redis.conf
chmod 777 /data/redis -R
chown root:root /data
//提交成为新镜像
docker commit redis5.0.9 redis-dev
//启动新镜像
docker run -d --privileged --name redis -p 5379:5379 -v /data/docker/volumes/redis/redis.conf:/etc/redis/redis.conf -v /data/docker/volumes/redis/data/:/data/redis/data -v /data/docker/volumes/redis/logs/redis.log:/data/redis/log/redis.log redis-dev:latest redis-server /etc/redis/redis.conf
//通过docker-compose.yml启动服务
docker-compose up -d
reids.conf
daemonize no
pidfile redis/redis.pid
port 5379
requirepass 123456
tcp-backlog 511
timeout 0
tcp-keepalive 0
loglevel notice
logfile /var/log/redis_6379.log
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis/6379
slave-serve-stale-data yes
slave-read-only yes
repl-disable-tcp-nodelay no
slave-priority 100
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-entries 512
list-max-ziplist-value 64
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
redis-dev镜像的docker-compose.yml
version: '3'
services:
redis:
image: redis-dev
container_name: redis
ports:
- "5379:5379"
volumes:
- /data/docker/volumes/redis/data/:/data/redis/data
- /data/docker/volumes/redis/logs/redis.log:/data/redis/log/redis.log
restart: on-failure
command: redis-server /etc/redis/redis.conf
mysql(5.7.30)容器化
//docker下载原始镜像
docker pull mysql:5.7.30
//创建原始容器,挂载对应路径到宿主机
docker run -d --privileged --name mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7.30
//创建本地卷目录
mkdir -p /data/docker/volumes/mysql/conf
mkdir -p /data/docker/volumes/mysql/logs
mkdir -p /data/docker/volumes/mysql/data
//创建文件赋权、上传至容器
touch my.cnf
chmod 755 my.cnf # 权限不要设置为777,否则mysql警告:mysqld: [Warning] World-writable config file '/etc/mysql/my.cnf' is ignored. 将忽略配置文件。
docker cp /data/docker/volumes/mysql/conf/my.cnf mysql:/etc/mysql
//提交成为新镜像
docker commit mysql mysql-dev
//启动新镜像
docker run -d --privileged --name mysql -p 3306:3306 -v /data/docker/volumes/mysql/conf/:/etc/mysql -v /data/docker/volumes/mysql/logs/:/var/log/mysql -v /data/docker/volumes/mysql/data/:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 mysql-dev
//通过docker-compose.yml启动服务
docker-compose up -d
my.cnf
[mysql]
# 设置mysql客户端默认字符集
default-character-set=utf8
[mysqld]
# 设置3306端口
port = 3306
# 设置mysql的安装目录
# basedir=/usr/local/mysql
# 设置mysql数据库的数据的存放目录
datadir=/var/lib/mysql
# 允许最大连接数
max_connections=500
# 服务端使用的字符集默认为8比特编码的latin1字符集
character-set-server=utf8
# 创建新表时将使用的默认存储引擎
default-storage-engine=INNODB
lower_case_table_names=1
max_allowed_packet=16M
mysql-dev镜像的docker-compose.yml
version: '3'
services:
mysql:
image: mysql-dev
container_name: mysql
ports:
- "3306:3306"
volumes:
- /data/docker/volumes/mysql/conf/:/etc/mysql
- /data/docker/volumes/mysql/logs/:/var/log/mysql
- /data/docker/volumes/mysql/data/:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=123456
restart: on-failure
zookeeper、kafka、kafka-manager组件化
创建通信网络
由于要涉及到zookeeper和kafka之间的通信,运用docker内部容器通信机制先新建一个网络。
[root@sz-ben-dev-01 ~]# docker network create kafka_zookeeper (新建网络)
dcb683a23044e902b251e01f493c814f940bd5bb592025c9eb4b78902f45091f
[root@sz-ben-dev-01 ~]# docker network ls (查看网络)
NETWORK ID NAME DRIVER SCOPE
50218292be46 bridge bridge local
8dc74fc4e063 host host local
80a2879ab000 kafak_zookeeper bridge local
10fb11e15eae none null local
[root@sz-ben-dev-01 ~]# docker network inspect kafka_zookeeper (查看网络详细信息)
[
{
"Name": "kafka_zookeeper",
"Id": "dcb683a23044e902b251e01f493c814f940bd5bb592025c9eb4b78902f45091f",
"Created": "2019-11-07T11:30:01.007966557+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "172.20.0.0/16",
"Gateway": "172.20.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Containers": {}, (连接的容器为空)
"Options": {},
"Labels": {}
}
]
[root@ZH02-TSER-78 config]# docker network inspect kafka_zookeeper (基于网络启动容器后)
[
{
"Name": "kafka_zookeeper",
"Id": "415ddaac51ce40e24b35a214e92160e373699d631659859a8ea81707874e0235",
"Created": "2020-05-28T12:14:06.569223604+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "172.20.0.0/16",
"Gateway": "172.20.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"58c9ecc808d8c1ad308d2b56d69c950884b5a7de30f29c6757fc1adedad2ed08": {
"Name": "zookeeper",
"EndpointID": "7f8fc76bb6972ee0c86dfc190d26780e2621fde356f12e7800b1032766277a8c",
"MacAddress": "02:42:ac:14:00:02",
"IPv4Address": "172.20.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
zookeeper(3.4.6)容器化
//docker下载原始镜像
docker pull wurstmeister/zookeeper:3.4.6
//创建原始容器,挂载对应路径到宿主机
docker run -d --name zookeeper -p 2181:2182 -v /etc/localtime:/etc/localtime wurstmeister/zookeeper:3.4.6
//创建本地卷目录
mkdir -p /data/docker/volumes/zookeeper/conf
mkdir -p /data/docker/volumes/zookeeper/logs
mkdir -p /data/docker/volumes/zookeeper/data
//下载配置文件,并修改;然后上传
docker cp zookeeper:/opt/zookeeper-3.4.6/conf/zoo.cfg /data/docker/volumes/zookeeper/conf/
docker cp /data/docker/volumes/zookeeper/conf/zoo.cfg zookeeper:/opt/zookeeper-3.4.6/conf
//提交成为新镜像
docker commit zookeeper zookeeper-dev
//启动新镜像
docker run -d --privileged --net=kafka_zookeeper --name zookeeper -p 2181:2181 -p 2888:2888 -p 3888:3888 -v /etc/localtime:/etc/localtime -v /data/docker/volumes/zookeeper/conf/zoo.cfg:/opt/zookeeper-3.4.6/conf/zoo.cfg -v /data/docker/volumes/zookeeper/data:/opt/zookeeper-3.4.6/data -v /data/docker/volumes/zookeeper/logs:/opt/zookeeper-3.4.6/logs zookeeper-dev
//通过docker-compose.yml启动服务
docker-compose up -d
zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper-3.4.6/data
dataLogDir=/opt/zookeeper-3.4.6/logs
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
zookeeper-dev镜像的docker-compose.yml
version: '3'
services:
zookeeper:
image: zookeeper-dev
container_name: zookeeper
networks:
- kafka_zookeeper
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- /etc/localtime:/etc/localtime
- /data/docker/volumes/zookeeper/conf/zoo.cfg:/opt/zookeeper-3.4.6/conf/zoo.cfg
- /data/docker/volumes/zookeeper/data:/opt/zookeeper-3.4.6/data
- /data/docker/volumes/zookeeper/logs:/opt/zookeeper-3.4.6/logs
restart: on-failure
networks:
kafka_zookeeper:
external: true
docker引擎会查找external声明的网络,找到后进行连接。否则会提示错误
ERROR: Network bennet declared as external, but could not be found. Please create the network manually using `docker network create bennet` and try again.
当其值为false时,会自动创建一个testProject_bennet的网络,如果没有networks字段时,会创建一个testProject_default的网络。
zookeeper(3.6)容器化
//docker下载原始镜像
docker pull zookeeper:3.6
//创建原始容器,挂载对应路径到宿主机
docker run -d --name zookeeper-test -p 2181:2182 -v /etc/localtime:/etc/localtime zookeeper:3.6
//创建本地卷目录
mkdir -p /data/docker/volumes/zookeeper/conf
mkdir -p /data/docker/volumes/zookeeper/logs
mkdir -p /data/docker/volumes/zookeeper/data
//下载配置文件,并修改;然后上传
docker cp zookeeper-test:/conf/zoo.cfg /data/docker/volumes/zookeeper/conf/
docker cp /data/docker/volumes/zookeeper/conf/zoo.cfg zookeeper-test:/conf
//提交成为新镜像
docker commit zookeeper zookeeper-dev
//启动新镜像
docker run -d --privileged --net=kafka_zookeeper --name zookeeper -p 2181:2181 -p 2888:2888 -p 3888:3888 -v /etc/localtime:/etc/localtime -v /data/docker/volumes/zookeeper/conf/zoo.cfg:/conf/zoo.cfg -v /data/docker/volumes/zookeeper/data:/data -v /data/docker/volumes/zookeeper/logs:/datalog zookeeper-dev
//通过docker-compose.yml启动服务
docker-compose up -d
zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data
dataLogDir=/datalog
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
zookeeper-dev镜像的docker-compose.yml
version: '3'
services:
zookeeper:
image: zookeeper-dev
container_name: zookeeper
networks:
- kafka_zookeeper
ports:
- "2181:2181"
- "2888:2888"
- "3888:3888"
volumes:
- /etc/localtime:/etc/localtime
- /data/docker/volumes/zookeeper/conf/zoo.cfg:/conf/zoo.cfg
- /data/docker/volumes/zookeeper/data:/data
- /data/docker/volumes/zookeeper/logs:/datalog
restart: on-failure
networks:
kafka_zookeeper:
external: true
kafka(2.12-2.4.1)组件化
//docker下载原始镜像
docker pull wurstmeister/kafka:2.12-2.4.1
//创建原始容器
docker run -d --net=kafka_zookeeper --name kafka -p 9092:9092 --link zookeeper -e KAFKA_ZOOKEEPER_CONNECT=172.20.0.2:2181 -e KAFKA_LISTENERS=PLAINTEXT://172.20.0.3:9092 -v /etc/localtime:/etc/localtime wurstmeister/kafka:2.12-2.4.1
//创建本地卷目录
mkdir -p /data/docker/volumes/kafka/config
mkdir -p /data/docker/volumes/kafka/logs
//下载配置文件,并修改;然后上传
docker cp kafka:/opt/kafka/config/server.properties /data/docker/volumes/kafka/config/
docker cp /data/docker/volumes/kafka/config/server.properties kafka:/opt/kafka/config
//提交成为新镜像
docker commit kafka kafka-dev
//启动新镜像
docker run -d --net=kafka_zookeeper --name kafka -p 9092:9092 --link zookeeper -e KAFKA_ZOOKEEPER_CONNECT=172.20.0.2:2181 -e KAFKA_LISTENERS=PLAINTEXT://172.20.0.3:9092 -e JMX_PORT=9999 -v /etc/localtime:/etc/localtime -v /data/docker/volumes/kafka/config/server.properties:/opt/kafka/config/server.properties -v /data/docker/volumes/kafka/logs:/opt/kafka/logs kafka-dev
//通过docker-compose.yml启动服务
docker-compose up -d
server.properties
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
# listeners=PLAINTEXT://10.51.103.78:9092
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
# advertised.listeners=PLAINTEXT://10.51.103.78:9092
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/opt/kafka_2.12-2.4.1/logs
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
kafka-dev镜像的docker-compose.yml
version: '3'
services:
kafka:
image: kafka-dev
container_name: kafka
external_links:
- zookeeper
networks:
- kafka_zookeeper
mac_address: 00:FF:0A:4E:5E:FA
ports:
- "9092:9092"
volumes:
- /etc/localtime:/etc/localtime
- /data/docker/volumes/kafka/logs:/opt/kafka/logs
environment:
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_LISTENERS=PLAINTEXT://kafka:9092
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.51.103.78:9092
- KAFKA_NUM_PARTITIONS=1
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS=3000
- JMX_PORT=9999
restart: on-failure
networks:
kafka_zookeeper:
external: true
volumes和environment不要操作同一个文件,否则会报资源繁忙
[Configuring] 'port' in '/opt/kafka/config/server.properties'
sed: can't move '/opt/kafka/config/server.propertiesgoHJeE' to '/opt/kafka/config/server.properties': Resource busy
kafka-manager(3.0.0.4)组件化
//docker下载原始镜像
docker pull kafkamanager/kafka-manager:3.0.0.4
//创建原始容器
docker run -d -it -p 9000:9000 --net=kafka_zookeeper --name cmak --link zookeeper -e ZK_HOSTS=172.20.0.2 kafkamanager/kafka-manager:3.0.0.4
kafka-manager镜像的docker-compose.yml
version: '3'
services:
kafka-manager:
image: kafkamanager/kafka-manager
container_name: kafka-manager
external_links:
- zookeeper
networks:
- kafka_zookeeper
ports:
- "9000:9000"
volumes:
- /etc/localtime:/etc/localtime
# - /data/docker/volumes/cmak/conf/application.conf:/opt/cmak-3.0.0.4/conf/application.conf
environment:
- ZK_HOSTS=zookeeper
restart: on-failure
networks:
kafka_zookeeper:
external: true
使用时报错Yikes! KeeperErrorCode = Unimplemented for /kafka-manager/mutex Try again.
这主要是由于zookeeper的版本过低导致的, zookeeper至少要3.5.x version以上,才可以,重新拉去最新版本的zookeeper,重启集群即可
emq(v4.0.5-alpine3.10-amd64)组件化
//docker下载原始镜像
docker pull emqx/emqx:v4.0.5-alpine3.10-amd64
//创建原始容器
docker run -d --name emqx-test -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p 18083:18083 emqx/emqx:v4.0.5-alpine3.10-amd64
//下载配置文件,并修改;然后上传
docker cp emqx-test:/opt/emqx/etc/emqx.conf /data/docker/volumes/emqx/etc
docker cp /data/docker/volumes/emqx/etc/emqx.conf emqx-test:/opt/emqx/etc
//进入容器赋权
docker exec -it emqx-test /bin/sh
chmod 777 /opt/emqx -R
//提交成为新镜像
docker commit emqx-test emqx-dev
//启动新镜像
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p 18083:18083 emqx-dev
//通过docker-compose.yml启动服务
docker-compose up -d
报错:关闭防火墙,重启docker,再重启服务
2020-06-08 02:33:20,435 - [ERROR] o.a.c.ConnectionState - Connection timed out for connection string (10.51.103.77:2181) and timeout (15000) / elapsed (30184)
org.apache.curator.CuratorConnectionLossException: KeeperErrorCode = ConnectionLoss
at org.apache.curator.ConnectionState.checkTimeouts(ConnectionState.java:225)
at org.apache.curator.ConnectionState.getZooKeeper(ConnectionState.java:94)
at org.apache.curator.CuratorZookeeperClient.getZooKeeper(CuratorZookeeperClient.java:117)
at org.apache.curator.framework.imps.CuratorFrameworkImpl.getZooKeeper(CuratorFrameworkImpl.java:489)
at org.apache.curator.framework.imps.ExistsBuilderImpl$2.call(ExistsBuilderImpl.java:199)
at org.apache.curator.framework.imps.ExistsBuilderImpl$2.call(ExistsBuilderImpl.java:193)
at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:109)
at org.apache.curator.framework.imps.ExistsBuilderImpl.pathInForeground(ExistsBuilderImpl.java:190)
at org.apache.curator.framework.imps.ExistsBuilderImpl.forPath(ExistsBuilderImpl.java:175)
at org.apache.curator.framework.imps.ExistsBuilderImpl.forPath(ExistsBuilderImpl.java:32)
2020-06-08 02:33:20,436 - [INFO] k.m.a.c.ClusterManagerActor - retryCount=1 maxRetries=10 zkConnect=10.51.103.77:2181
emqx镜像的docker-compose.yml
version: '3'
services:
emqx:
image: emqx-dev
container_name: emqx
ports:
- "1883:1883"
- "8083:8083"
- "8883:8883"
- "8084:8084"
- "18083:18083"
restart: on-failure
influxdb(1.7.0)组件化
//docker下载原始镜像
docker pull influxdb:1.7.0
//创建原始容器
docker run -d --name influxdb-test -p 8086:8086 influxdb:1.7.0
//创建本地卷目录
mkdir -p /data/docker/volumes/influxdb/data
mkdir -p /data/docker/volumes/influxdb/conf
mkdir -p /data/docker/volumes/influxdb/meta
mkdir -p /data/docker/volumes/influxdb/wal
//下载配置文件,并修改;然后上传
docker cp influxdb-test:/etc/influxdb/influxdb.conf /data/docker/volumes/influxdb/conf
docker cp /data/docker/volumes/influxdb/conf/influxdb.conf influxdb-test:/etc/influxdb/
//提交成为新镜像
docker commit influxdb-test influxdb-dev
//启动新镜像
docker run -d -p 8085:8083 -p 8086:8086 --name influxdb -v /data/docker/volumes/influxdb/conf/influxdb.conf:/etc/influxdb/influxdb.conf -v /data/docker/volumes/influxdb/data/:/var/lib/influxdb/data -v /data/docker/volumes/influxdb/meta:/var/lib/influxdb/meta -v /data/docker/volumes/influxdb/wal:/var/lib/influxdb/wal influxdb-dev
//通过docker-compose.yml启动服务
docker-compose up -d
zoo.cfg
[meta]
dir = "/var/lib/influxdb/meta"
[data]
dir = "/var/lib/influxdb/data"
engine = "tsm1"
wal-dir = "/var/lib/influxdb/wal"
max-series-per-database = 0
max-values-per-tag = 0
series-id-set-cache-size = 100
influxdb镜像的docker-compose.yml
version: '3'
services:
influxdb:
image: influxdb-dev
container_name: influxdb
ports:
- "8085:8083"
- "8086:8086"
volumes:
- /data/docker/volumes/influxdb/conf/influxdb.conf:/etc/influxdb/influxdb.conf
- /data/docker/volumes/influxdb/data/:/var/lib/influxdb/data
- /data/docker/volumes/influxdb/meta:/var/lib/influxdb/meta
- /data/docker/volumes/influxdb/wal:/var/lib/influxdb/wal
restart: on-failure