基于static_configs进行服务发现
一、prometheus.yml
[root@k8s-harbor1 prometheus]#vim prometheus.yml
# Sample config for Prometheus.
global:
scrape_interval: 15s #将数据收集间隔时间设置为每15秒一次。默认为每1分钟一次。
evaluation_interval: 15s #规则扫描间隔时间,每15秒评估一次规则。默认为每1分钟一次。
# scrape_timeout is set to the global default (10s). #超时时间
#在与外部系统(联邦、远程存储、Alertmanager)通信时,将这些标签附加到任何时间序列或警报上。
external_labels:
monitor: 'example'
# Alertmanager配置
alerting:
alertmanagers:
- static_configs:
- targets: ['192.168.150.159:9093']
# 规则配置
rule_files:
- "/etc/prometheus/rule.yml" #指定rule文件
# - "first_rules.yml"
# - "second_rules.yml"
#一种scrape配置,只包含一个要scrape的端点:
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: #数据采集目标配置
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
#通过采集本地Prometheus的指标数据:
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
scrape_timeout: 5s
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
#通过node-exporter采集本地的指标数据:
- job_name: node
# If prometheus-node-exporter is installed, grab stats about the local machine by default.
static_configs:
- targets: ['localhost:9100']
#通过node-exporter采集node节点的指标数据:
- job_name: prometheus-node
static_configs:
- targets: ['192.168.150.161:9100','192.168.150.162:9100','192.168.150.163:9100']
#通过blackbox exporter实现网站监控/URL监控:
- job_name: http_status
metrics_path: /probe
params:
module: [http_2xx] #与blackbox.yml中模块对应
static_configs:
- targets: ['http://www.xiaomi.com','http://www.magedu.com']
labels:
instance: http_status
group: web
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]