- -d 后台运行
- –name 给容器命名
- -p 宿主机端口:容器内部端口 端口映射
- -v 数据卷
- -e 配置环境
基础命令
命令 | 解释 |
---|---|
docker images | 查看镜像 |
docker run -d --name nginx01 -p 3355:80 nginx | 启动nginx镜像并且开放3355端口号 |
curl loca7host: 3355 | 测试 |
docker pull tomcat:9.0 | 下载镜像,可以指定版本号 |
docker run -d -p 3366:8080 --name tomcat1 tomcat | 启动tomcat镜像开放3366端口号 |
docker rmi 08efef7ca980 | 删除镜像 |
docker rmi -f $(docker images -aq) | 删除所有镜像 |
docker exec -it containerId /bin/bash | 以交互模式进入容器,redis用/bin/sh |
docker ps -a | 查看所有的容器 |
docker start containerId | 启动容器 |
docker stop containerId | 停止容器 |
docker rm containerId | 删除容器 |
docker rm -f $(docker ps -aq) | 删除所有容器 |
docker inspect containerId | 查看容器内部信息 |
curl localhost:8080 | 测试容器 |
docker attach containerId | 进入已经运行的容器中 |
docker stats | 查看内存使用情况 |
docker save -o centos_image.docker centos | 保存镜像到当前文件 |
docker load -i centos_image.docker | 加载镜像 |
es指令
命令 | 解释 |
---|---|
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e “discovery.type=single-node” -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch:7.6.2 | 启动es,并且指定内存分配空间 |
commit指令
命令 | 解释 |
---|---|
docker commit -a=“name” -m=“test add webapps” 容器id tomcat02:1.0 | 发布新的镜像 |
-v 挂载
命令 | 解释 |
---|---|
docker run -it --name ceshi -v /home/ceshi3:/home centos /bin/bash | 挂载 |
docker run -it --name docker02 --volumes-from docker01 镜像ID | docker02 继承docker01挂载启动 |
mysql
命令 | 解释 |
---|---|
docker run -d -p 3310:3306 -v /home/mysql/conf:/etc/mysql/conf.d -v /home/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 --name mysql01 mysql:5.7 | 启动mysql 挂载出配置文件、数据文件、修改密码、修改名字 |
挂载tomcat webapps和日志
docker run -d -p 8085:8080 -v /usr/local/tomcat/webapps/:/usr/local/apache-tomcat-8.5.66/webapps/ -v /usr/local/tomcat/logs/:/usr/local/apache-tomcat-8.5.66/logs --name tomcat tomcat02
如何确定是具名挂载还是匿名挂载,还是指定路径挂载!
-v 容器内路径 #匿名挂载
-v 卷名:容器内路径 #具名挂载
-v /宿主机路径:容器内路径 #指定路径挂载!
用build构建自己的镜像
编写文件dockerfile,可以实现构建及挂载
FROM centos
VOLUME ["volume01","volume02"]
CMD echo "------end------"
CMD /bin/bash
-f 指定文件地址
-t 是tag的意思
docker build -f /home/docker-test-volume/dockerfile1 -t name/centos .
生成镜像后,启动容器,查看挂载的文件目录的位置,用inspect查看容器详情,在Mounts中source可以看到宿主机文件位置,容器中volume01,volume02所创建的文件在宿主机的这个路径会被同步
dockerFile发布自己的tomcat镜像
FROM centos
MAINTAINER name<123456789@qq.com>
COPY readme.txt /usr/local/readme.txt
ADD jdk-8u11-linux-x64.tar.gz /usr/local/
ADD apache-tomcat-9.0.22.tar.gz /usr/local/
RUN yum -y install vim
ENV MYPATH /usr/localWORKDIR $MYPATH
ENV JAVA_HOME /usr/local/jdk1.8.0_11
ENV CLASSPATH $JAVA_HOME/1ib/dt.jar:$JAVA_HOME/1ib/tools.jar
ENV CATALINA_HOME /usr/local/apache-tomcat-9.0.22
ENV CATALINA_BASH /usr/local/apache-tomcat-9.0.22
ENV PATH $PATH:$JAVA_HOME/bin:$CATALINA_HOME/1ib:$CATALINA_HOME/bin
EXPOSE 8080
CMD /usr/local/apache-tomcat-9.0.22/bin/startup.sh && tail -F /ur1/local/apache-tomcat-9.0.22/bin/logs/catalina.out
自定义网络
此方法可以不用docker的–link指令,就可以实现容器之间通讯
这里-P代表随机映射端口号,49000~49900 之间,有意思的是-P需要关闭防火墙才可以访问,-p不需要,可能-p有添加白名单操作
在我们用docker run -d -P --name tomcat01 tomcat启动的时候
实际上是docker run -d -P --name tomcat01 --net bridge tomcat,添加了--net bridge默认参数
--driver bridge 桥接
--subnet 192.168.0.0/16 子网地址
--gateway 192.168.0.1 mynet 网关,家里路由器地址
docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
命令 | 解释 |
---|---|
docker network ls | 查看网络 |
docker network inspect mynet | 查看某个网络详情 |
docker run -d -P --name tomcat-net-01 --net mynet tomcat | 启动一个在我们自定义网络的容器 |
docker exec -it tomcat-net-01 ping tomcat-net-02 | 用自定义容器ping另外一个自定义容器 |
网络连通
尝试ping不同网关的容器
docker exec -it tomcat1 ping tomcat-net-01
这时候是ping不通的,需要打通才可以
#网关之间是不能相互打通的,只能通过容器打通网关
其实就是将tomcat1放到了mynet 网络下
docker network connect mynet tomcat1
redis集群
1.自定义redis网卡及查看
docker network create redis --subnet 172.38.0.0/16
docker network ls
docker network inspect redis
2.redis用脚本创建节点及设置
for port in $(seq 1 6);
do
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
3.开启redis服务
//节点1
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /mydata/redis/node-1/data:/data \
-v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
//节点2
docker run -p 6372:6379 -p 16372:16379 --name redis-2 \
-v /mydata/redis/node-2/data:/data \
-v /mydata/redis/node-2/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.12 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
//节点3
docker run -p 6373:6379 -p 16373:16379 --name redis-3 \
-v /mydata/redis/node-3/data:/data \
-v /mydata/redis/node-3/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.13 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
//节点4
docker run -p 6374:6379 -p 16374:16379 --name redis-4 \
-v /mydata/redis/node-4data:/data \
-v /mydata/redis/node-4/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.14 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
//节点5
docker run -p 6375:6379 -p 16375:16379 --name redis-5 \
-v /mydata/redis/node-5/data:/data \
-v /mydata/redis/node-5/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.15 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
//节点6
docker run -p 6376:6379 -p 16376:16379 --name redis-6 \
-v /mydata/redis/node-6/data:/data \
-v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.16 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
4.进入到redis节点中
docker exec -it redis-1 /bin/sh
5.创建集群
redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 \
172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 \
172.38.0.16:6379 --cluster-replicas 1
redis会打印集群的信息,然后问你是否按照这样创建集群,输入yes创建
[root@localhost redis]# docker exec -it redis-1 /bin/sh
/data # redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 \
> 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 \
> 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: 21271498f1a004a1e4f5eaeb71f8f8619b8f6d2a 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
M: c5741bb94117140b6dd8f095654f715744c0050e 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
M: 25200c6f345cfa6c983dd80f0e57f92664b37021 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
S: 8bb43d84170a744064885c442f269782fc2ecb75 172.38.0.14:6379
replicates 25200c6f345cfa6c983dd80f0e57f92664b37021
S: f116f35f4d195a09d8afeebdbc7e5c745c94b1e7 172.38.0.15:6379
replicates 21271498f1a004a1e4f5eaeb71f8f8619b8f6d2a
S: 3b7cf88e091c0d156d878dfab906c9ad4bed0545 172.38.0.16:6379
replicates c5741bb94117140b6dd8f095654f715744c0050e
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: 21271498f1a004a1e4f5eaeb71f8f8619b8f6d2a 172.38.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: 25200c6f345cfa6c983dd80f0e57f92664b37021 172.38.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
M: c5741bb94117140b6dd8f095654f715744c0050e 172.38.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: 3b7cf88e091c0d156d878dfab906c9ad4bed0545 172.38.0.16:6379
slots: (0 slots) slave
replicates c5741bb94117140b6dd8f095654f715744c0050e
S: 8bb43d84170a744064885c442f269782fc2ecb75 172.38.0.14:6379
slots: (0 slots) slave
replicates 25200c6f345cfa6c983dd80f0e57f92664b37021
S: f116f35f4d195a09d8afeebdbc7e5c745c94b1e7 172.38.0.15:6379
slots: (0 slots) slave
replicates 21271498f1a004a1e4f5eaeb71f8f8619b8f6d2a
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
6.进入redis集群中
# -c 表示集群
redis-cli -c
cluster info //查看集群配置
cluster nodes //查看集群节点信息
7.测试
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.38.0.13:6379
OK
#然后再开一个窗口关闭 172.38.0.13这台机器
#这里需要退出当前,重新连接redis集群
172.38.0.13:6379> get a
^Z[1]+ Stopped redis-cli -c
/data # redis-cli -c
127.0.0.1:6379> get a
#可以看到172.38.0.14这台机器顶上来了
-> Redirected to slot [15495] located at 172.38.0.14:6379
"b"
172.38.0.14:6379>
#这里可以看到13这太已经fail状态
172.38.0.14:6379> cluster nodes
21271498f1a004a1e4f5eaeb71f8f8619b8f6d2a 172.38.0.11:6379@16379 master - 0 1616659740455 1 connected 0-5460
8bb43d84170a744064885c442f269782fc2ecb75 172.38.0.14:6379@16379 myself,master - 0 1616659740000 7 connected 10923-16383
f116f35f4d195a09d8afeebdbc7e5c745c94b1e7 172.38.0.15:6379@16379 slave 21271498f1a004a1e4f5eaeb71f8f8619b8f6d2a 0 1616659740000 5 connected
25200c6f345cfa6c983dd80f0e57f92664b37021 172.38.0.13:6379@16379 master,fail - 1616659450665 1616659448144 3 connected
3b7cf88e091c0d156d878dfab906c9ad4bed0545 172.38.0.16:6379@16379 slave c5741bb94117140b6dd8f095654f715744c0050e 0 1616659740000 6 connected
c5741bb94117140b6dd8f095654f715744c0050e 172.38.0.12:6379@16379 master - 0 1616659741470 2 connected 5461-10922