host | IP |
---|---|
Docker1 | 192.168.10.11 |
Docker2 | 192.168.10.12 |
准备工作
1、清空防火墙(都要清空防火墙)
[root@docker2 ~]# iptables -F
[root@docker2 ~]# iptables-save
一、下载镜像
1、docker1
[root@Docker1 ~]# docker pull google/cadvisor
[root@Docker1 ~]# docker pull prom/prometheus
[root@Docker1 ~]# docker pull prom/node-exporter
[root@Docker1 ~]# docker pull grafana/grafana
注释:
node-exporter主要用来收集主机的硬件使用数据,这样就可以使用cadvisor和node-exporter将容器与主机的数据都进行了收集
grafana可视化(web的ui)
2、docker2
[root@docker2 ~]# docker pull google/cadvisor
[root@docker2 ~]# docker pull prom/node-exporter
二、运行cadvisor
docker1
[root@Docker1 ~]# docker run --volume /:/rootfs:ro --volume /var/run:/var/run:rw \
--volume /sys:/sys:ro --volume /var/lib/docker:/var/lib/docker:ro \
--network host -d --name cadvisor google/cadvisor
#访问本机IP的8080端口进行验证
docker2
[root@Docker2 ~]# docker run --volume /:/rootfs:ro --volume /var/run:/var/run:rw \
--volume /sys:/sys:ro --volume /var/lib/docker:/var/lib/docker:ro \
--network host -d --name cadvisor google/cadvisor
#访问本机IP的8080端口进行验证
三、运行exporter
1、docker1
[root@Docker1 ~]# docker run -d --name exporter \
--volume /proc/:/host/proc \
--volume /sys/:/host/sys --volume /:/rootfs --network host \
prom/node-exporter --path.procfs /host/proc --path.sysfs /host/sys \
--collector.filesystem.ignored-mount-points \
"^(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers\
|rootfs/var/lib/docker/overlay2\
|rootfs/run/docker/netns|rootfs/var/lib/docker/devicemapper\
|rootfs/var/lib/docker/aufs)($$|/)"
#访问本机IP的9100端口
2、docker2
[root@Docker2 ~]# docker run -d --name exporter \
--volume /proc/:/host/proc \
--volume /sys/:/host/sys --volume /:/rootfs --network host \
prom/node-exporter --path.procfs /host/proc --path.sysfs /host/sys \
--collector.filesystem.ignored-mount-points \
"^(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers\
|rootfs/var/lib/docker/overlay2\
|rootfs/run/docker/netns|rootfs/var/lib/docker/devicemapper\
|rootfs/var/lib/docker/aufs)($$|/)"
#访问本机IP的9100端口
四、运行prometheus
[root@Docker1 ~]# vim prometheus.yml
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # Evaluate rules every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'codelab-monitor'
rule_files:
# - 'prometheus.rules.yml'
scrape_configs:
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
# scrape_interval: 5s
static_configs:
- targets: ['192.168.10.11:9090','192.168.10.11:8080','192.168.10.11:9100','192.168.10.12:8080','192.168.10.12:9100']
[root@Docker1 ~]# docker run -d --network host \
--volume /root/prometheus.yml:/etc/prometheus/prometheus.yml \
--name prometheus prom/prometheus
#访问本机IP:9090端口
查看5台主机是否都为up状态
五、运行grafana
[root@Docker1 ~]# docker run -d -i --name grafana --network host \
-e "GF_SERVER_ROOT_URL=http://grafana.server.name" \
-e "GF_SECURITY_ADMIN_PASSWORD=123.com" grafana/grafana
#GF_SECURITY_ADMIN_PASSWORD后面写的是密码,用户名默认为admin
#访问本机IP的3000端口
下载所需要的json问津
点击这里即可下载
最终会显示这个页面