# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets: ['192.168.188.53:9093']
#- alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "node_rules.yml"
- "mysql_rules.yml"
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["192.168.188.53:9090"]
- job_name: linux
metrics_path: '/metrics'
static_configs:
- targets: ['192.168.188.40:9100','192.168.188.41:9100','192.168.188.42:9100','192.168.188.43:9100',
'192.168.188.44:9100','192.168.188.45:9100','192.168.188.46:9100','192.168.188.47:9100',
'192.168.188.48:9100','192.168.188.49:9100','192.168.188.50:9100','192.168.188.51:9100',
'192.168.188.52:9100','192.168.188.53:9100']
- job_name: mysql
metrics_path: '/metrics'
static_configs:
- targets: ['192.168.188.50:9105','192.168.188.51:9105','192.168.188.52:9105']
- job_name: 'consul-prometheus'
consul_sd_configs:
- server: '192.168.:8500'
services: []
relabel_configs:
- source_labels: [__meta_consul_tags]
regex: .*test.*
action: keep
- regex: __meta_consul_service_metadata_(.+)
action: labelmap