docker 的部分软件安装
安装docker
docker官网有教程。再将源改成阿里的源(自行百度)
安装MySQL
先去docker hub中搜索自己需要的版本。
在执行如下命令
# xxx对应docker hub对应的版本,不加默认最新
sudo docker pull mysql:xxx
下载后我们可以通过如下命令查看我们下载的镜像
sudo docker pull images
mkdir log conf data mysql-files
如果没有显示镜像信息,则下载失败。下载没成功的话可以自行百度原因。
我们可以安装MySQL:
docker run --restart=always -p 18801:3306 --name mysql8 \
-v /data/dockerData/mysql8/log:/var/log/mysql \
-v /data/dockerData/mysql8/data:/var/lib/mysql \
-v /data/dockerData/mysql8/conf/my.cnf:/etc/mysql/my.cnf \
-v /data/dockerData/mysql8/mysql-files:/var/lib/mysql-files \
-e MYSQL_ROOT_PASSWORD=root@root*** \
-d mysql
docker run --restart=always -p 18800:3306 --name mysql \
-v /data/dockerData/mysql5.7/log:/var/log/mysql \
-v /data/dockerData/mysql5.7/data:/var/lib/mysql \
-v /data/dockerData/mysql5.7/conf/my.cnf:/etc/mysql/my.cnf \
-v /data/dockerData/mysql5.7/mysql-files:/var/lib/mysql-files \
-e MYSQL_ROOT_PASSWORD=root@root*** \
-d mysql:5.7
my.cnf可以简单写:
[client]
default_character_set=utf8
[mysqld]
collation_server = utf8_general_ci
character_set_server = utf8
sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'
[mysql]
default_character_set = utf8
- –restart=always 表示失败后自动重启。如果一直重启我们需要看看日志文件,查看哪里出错。
可使用下面的命令:sudo docker logs mysql;
- -p 表示将本机的3306端口映射到MySQL所在机器的3306端口
- –name 表示将其命名为mysql
- -v 表示将本地文件和docker内MySQL的内部文件相映射,本地文件夹和文件需要提前创建
- -e 是传递环境变量 ,这个是将MySQL的root用户的密码更改为xxx
- -d 表示我们使用那个镜像
运行完成会出现一串字符,我们可以使用下面命令查看是否运行成功。
sodo docker ps -a
查看所有运行的容器。
安装redis
类似于MySQL ,先拉取镜像。下面的安装命令。
docker run --restart=always -p 18802:6379 --name redis \
-v /data/dockerData/redis/conf/redis.conf:/etc/redis/redis.conf \
-d redis redis-server /etc/redis/redis.conf
redis.conf的原始配置
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
requirepass root@root***
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events Ex
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
安装zookeeper
sudo docker run --restart=always -p 2181:2181 --name zookeeper \
-v /data/zookeeper/data:/data \
-v /data/zookeeper/conf:/conf \
-v /data/zookeeper/datalog:/datalog \
-v /etc/localtime:/etc/localtime \
-d zookeeper:latest
如果已经映射了文件了则zoo.cfg文件的编写应该按照相对路径:
clientPort=2181
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
quorumListenOnAllIPs=true
server.1=1.117.89.91:2888:3888
server.2=121.4.104.4:2888:3888
server.3=120.48.110.64:2888:3888
安装kafka
sudo docker run -p 9092:9092 --name kafka \
-e KAFKA_BROKER_ID=1 \
-e KAFKA_ZOOKEEPER_CONNECT=192.168.1.113:2181/kafka \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.1.113:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-v /etc/localtime:/etc/localtime \
-v /usr/docker/data/kafka:/kafka \
-d wurstmeister/kafka
# 创建主题
bin/kafka-topics.sh --create --zookeeper 192.168.1.113:2181/kafka --replication-factor 1 --partitions 1 --topic test-topic
# 消费
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test-topic --from-beginning
# 生产
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test-topic
mongodb
docker pull mongo:4.2.6
docker run -d --name mongod -p 27017:27017 mongo:4.2.6 --auth
进入容器
# 进入MySQL的bash
sudo docker exec -it mysql bash
# 其他的也类似
sudo docker exec -it kafka /bin/bash
frps(配置具有公网的服务器)
创建 frps.toml
[common]
# 监听端口
bind_port = 7000
# 面板端口
dashboard_port = 7500
# 登录面板账号设置
dashboard_user = admin
dashboard_pwd = spoto1234
# 设置http及https协议下代理端口(非重要)
vhost_http_port = 7080
vhost_https_port = 7081
# 身份验证
token = 12345678
docker run --restart=always --network host -v /etc/frp/frps.toml:/etc/frp/frps.toml --name frps -d snowdreamtech/frps
frpc
[common]
# server_addr为FRPS服务器IP地址
server_addr = x.x.x.x
# server_port为服务端监听端口,bind_port
server_port = 7000
# 身份验证
token = 12345678
[ssh]
type = tcp
local_ip = 127.0.0.1
local_port = 22
remote_port = 2288
# [ssh] 为服务名称,下方此处设置为,访问frp服务段的2288端口时,等同于通过中转服务器访问127.0.0.1的22端口。
# type 为连接的类型,此处为tcp
# local_ip 为中转客户端实际访问的IP
# local_port 为目标端口
# remote_port 为远程端口
[ssh]
type = tcp
local_ip = 192.168.1.229
local_port = 80
remote_port = 18022
[unRAID web]
type = tcp
local_ip = 192.168.1.229
local_port = 80
remote_port = 18088
[Truenas web]
type = tcp
local_ip = 192.168.1.235
local_port = 80
remote_port = 18188
[speedtest]
type = tcp
local_ip = 192.168.1.229
local_port = 6580
remote_port = 18190
[webdav]
type = tcp
local_ip = 192.168.1.235
local_port = 18080
remote_port = 18189
[RDP PC1]
type = tcp
local_ip = 192.168.1.235
local_port = 3389
remote_port = 18389
docker run --restart=always --network host -v /etc/frp/frpc.toml:/etc/frp/frpc.toml --name frpc -d snowdreamtech/frpc
docker compose
kafka
services:
zookeeper:
image: zookeeper
container_name: zookeeper
ports:
- 2181:2181
restart: always
networks:
- kafka
environment:
- TZ="Asia/Shanghai"
volumes:
- /data/dockerData/zookeeper/data:/data
- /data/dockerData/zookeeper/conf:/conf
- /etc/localtime:/etc/localtime
kafka:
image: wurstmeister/kafka
container_name: kafka
ports:
- 9092:9092
networks:
- kafka
environment:
- TZ="Asia/Shanghai"
- KAFKA_BROKER_ID=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181/kafka
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.56.109:9092
- KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
volumes:
- /etc/localtime:/etc/localtime
- /data/dockerData/kafka:/kafka
depends_on:
- zookeeper
kafka-ui:
image: provectuslabs/kafka-ui
container_name: kafka-ui
ports:
- 8081:8080
depends_on:
- kafka
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
networks:
kafka: