基础服务部署
docker
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
/* 这一段可以不要
mkdir /etc/docker/
cat >/etc/docker/daemon.json <<EOF
{
"graph": "/data/docker",
"storage-driver": "overlay2",
"insecure-registries": ["registry.access.redhat.com","quay.io"],
"bip": "172.0.30.1/24",
"exec-opts": ["native.cgroupdriver=systemd"],
"live-restore": true
}
EOF
mkdir -p /data/docker
*/
systemctl start docker
systemctl status docker
systemctl enable docker
docker --version
第二种安装docker的方式,阿里云服务器
sudo wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum -y install docker-ce
docker -v
sudo systemctl start docker
sudo systemctl enable docker
sudo systemctl status docker
另外阿里云最新仓库
{
"registry-mirrors" :
[
"https://2l1bnhmy.mirror.aliyuncs.com"
]
}
es
docker run -p 9222:9200 -p 9300:9300 -itd -e "discovery.type=single-node" --name elastic \
-v /biz-code/es_db:/usr/share/elasticsearch/data \
-v /etc/localtime:/etc/localtime \
docker.elastic.co/elasticsearch/elasticsearch:7.9.2
kibana
docker run -p 5666:5601 -it -d --link elastic -v /etc/localtime:/etc/localtime -e ELASTICSEARCH_URL=http://localhost:9200 \
--name kibana kibana:7.9.2
docker exec -it kibana /bin/bash
vi /opt/kibana/config/kibana.yml
elasticsearch.hosts: [ "http://172.16.0.15:9222/" ]
redis
docker run -p 6333:6379 --name redis -v /data:/data -v /etc/localtime:/etc/localtime -d redis redis-server --appendonly yes \
--requirepass zby123456 --restart=always
mysql
docker run -itd --name mysql --restart=always -e MYSQL_ROOT_PASSWORD=zhubaoe@mysql -p 3333:3306 -v /data/mysql/data:/var/lib/mysql -v /etc/localtime:/etc/localtime mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
指定配置文件版(如果启动有问题,就把字符集删除)
docker run -itd -p 9876:3306 --privileged=true -v /backup/data/mysql/data/backup-my.cnf:/etc/my.cnf -v /backup/data/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=zby1q2w3e --name mysql-rds mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci
nginx
docker run -dit -p 80:80 -v /biz-code/configs/nginx.conf:/etc/nginx/nginx.conf -v /etc/localtime:/etc/localtime --name=nginx nginx
zookeeper
docker run -itd --name zookeeper_back --publish 2182:2181 --volume /etc/localtime:/etc/localtime --restart=always wurstmeister/zookeeper
kafka
docker run -itd --name kafka --publish 9092:9092 --env KAFKA_BROKER_ID=100 --env HOST_IP=172.16.0.12 --env KAFKA_ZOOKEEPER_CONNECT=172.16.0.12:2182 --env KAFKA_ADVERTISED_HOST_NAME=172.16.0.12 --env KAFKA_ADVERTISED_PORT=9093 --restart=always --volume /etc/localtime:/etc/localtime wurstmeister/kafka
kafka-ui(可指定多个集群)
docker run -itd --name kafka-ui -p 8086:8080 \
-e KAFKA_CLUSTERS_0_NAME=prod \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=172.16.0.12:9092 \
-e KAFKA_CLUSTERS_1_NAME=laomiao \
-e KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS=172.16.0.12:9093 \
-d provectuslabs/kafka-ui:latest
nginx+php
docker run --name lmws \
-v /biz-code/lmws/build-work/lmws.zhubaoe.cn.conf:/etc/nginx/sites-enabled/lmws.ze.cn.conf \
-v /biz-code/lmws:/var/www/html \
-p 8092:80 \
-d richarvey/nginx-php-fpm
附带nginx配置
server {
listen 80;
root /var/www/html;
location / {
index index.htm index.html index.php;
if (!-e $request_filename) {
rewrite ^/index.php(.*)$ /index.php?s=$1 last;
rewrite ^(.*)$ /index.php?s=$1 last;
}
}
location ~ \.php$ {
try_files $uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php-fpm.sock;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_index index.php;
include fastcgi_params;
}
}
jenkins
docker run -d -u root --cap-add SYS_TIME -p 8080:8080 -p 50000:50000 -v /soft/data/jenkins_home_new:/var/jenkins_home -v /var/run/docker.sock:/var/run/docker.sock --name jenkins jenkins/jenkins:2.375.1
prometheus
docker run -itd -p 9090:9090 --name prometheus --restart=always -v /data/prometheus/data/:/data -v /data/prometheus/prometheus.yml:/data/prometheus.yml prom/prometheus --config.file=/data/prometheus.yml --web.enable-lifecycle --storage.tsdb.retention=90d --enable-feature=remote-write-receiver
prometheus.yml
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
#监听的地址
- targets: ['localhost:9090']