k8s集群监控指标及告警阀值大全【超级详细,全是干货,直接粘贴复制】

目录

kube-apiserver

coredns

etcd

kube-controller-manager

kubelet

kube-scheduler

kube-state-metrics

node

deployment

ingress-nginx

pod


kube-apiserver

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  generation: 3
  labels:
    app: rancher-monitoring
    app.kubernetes.io/instance: rancher-monitoring
    app.kubernetes.io/part-of: rancher-monitoring
  name: kube-apiserver-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: kube-apiserver.rule
    rules:
    - alert: K8SAPIAerverDown
      expr: up{job="apiserver"} == 0
      for: 1m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: kube-apiserver {{ $labels.instance }} is down. {{ $labels.instance }} isn't reachable or have disappeared from service
          discovery.
        summary: kube-apiserver is down
    - alert: K8SApiserverDown
      expr: absent(up{job="apiserver"} == 1)
      for: 10m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        description: No API servers are reachable or all have disappeared from service
          discovery
        summary: No API servers are reachable
    - alert: K8SApiserverUserCPU
      expr: sum(rate(container_cpu_user_seconds_total{pod=~"kube-apiserver.*",container_name!="POD"}[5m]))by(pod) > 1 
      for: 5m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetes apserver {{ $labels.instance }} is user cpu time > 1s. {{ $labels.instance }} isn't reachable"
        summary: "kubernetes apserver 负载较高超过1s,当前值为{{$value}}"
    - alert: K8SApiserverUserCPU
      expr: sum(rate(container_cpu_user_seconds_total{pod=~"kube-apiserver.*",container_name!="POD"}[5m]))by(pod) > 5
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetes apserver {{ $labels.instance }} is user cpu time > 5s. {{ $labels.instance }} isn't reachable"
        summary: "kubernetes apserver 负载较高超过5s,当前值为{{$value}}"
    - alert: K8SApiserverUserCPU
      expr: sum(rate(container_cpu_user_seconds_total{pod=~"kube-apiserver.*",container_name!="POD"}[5m]))by(pod) > 10
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetes apserver {{ $labels.instance }} is user cpu time > 10s. {{ $labels.instance }} isn't reachable"
        summary: "kubernetes apserver 负载较高超过10s,当前值为{{$value}}"
    - alert: K8SApiserverUseMemory
      expr: sum(rate(container_memory_usage_bytes{pod=~"kube-apserver.*",container_name!="POD"}[5m])/1024/1024)by(pod) > 150
      for: 5m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }} is use memory More than 150MB"
        summary: "kubernetes apserver 使用内存超过150MB,当前值为{{$value}}MB"
    - alert: K8SApiserverUseMemory
      expr: sum(rate(container_memory_usage_bytes{pod=~"kube-apserver.*",container_name!="POD"}[5m])/1024/1024)by(pod) > 300
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }} is use memory More than 300MB"
        summary: "kubernetes apserver 使用内存超过300MB,当前值为{{$value}}MB"
    - alert: K8SApiserverUseMemory
      expr: sum(rate(container_memory_usage_bytes{pod=~"kube-apserver.*",container_name!="POD"}[5m])/1024/1024)by(pod) > 600
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }} is use memory More than 600MB"
        summary: "kubernetes apserver 使用内存超过600MB,当前值为{{$value}}MB"
        #- alert: K8SApiserverApiError
        #  expr: sum(rate(apiserver_request_total{job="apiserver",code=~"[45].."}[5m]))by (resource,subresource,verb) /sum(rate(apiserver_request_total{job="apiserver"}[5m])) by (resource,subresource,verb) > 0.5
        #  for: 10m
        #  labels:
        #    severity: warning
        #    cluster: manage-prod
        #  annotations:
        #    current_value: '{{$value}}'
        #    description: "kubernetess apserver {{ $labels.instance }} API 4xx,5xx too many"
        #    summary: "kubernetes apserver 4xx,5xx错误很多,请检查"
    - alert: K8SApiserverWorkerQueue
      expr: sum(apiserver_current_inflight_requests{job="apiserver"}) > 200
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }} worker queue accumulation"
        summary: "kubernetes apserver 待处理的请求数量 > {{$value}} "
    - alert: K8SApiserverWorkerQueue
      expr: sum(apiserver_current_inflight_requests{job="apiserver"}) > 400
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }} worker queue accumulation"
        summary: "kubernetes apserver 待处理的请求数量 > {{$value}} "
    - alert: K8SApiserverQueueWite
      expr: histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{ job="apiserver"}[5m])) by (le)) > 1
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }} worker queue wite"
        summary: "kubernetes apserver 工作队列中停留的时间延时较大 > {{$value}} "
    - alert: K8SApiserverWorkerAddCount
      expr: sum(rate(workqueue_adds_total{job="apiserver"}[5m])) > 100
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess apserver {{ $labels.instance }}"
        summary: "kubernetes apserver 工作队列处理的添加总数,可能有堆积请检查 > {{$value}} "

coredns

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  labels:
    app: rancher-monitoring
    app.kubernetes.io/instance: rancher-monitoring
    app.kubernetes.io/part-of: rancher-monitoring
  name: coredns-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: coredns.rule
    rules:
    - alert: coredns-responses-error
      expr:  histogram_quantile(0.99, sum(rate(coredns_dns_request_duration_seconds_bucket{job="kube-dns"}[5m])) by(server, zone, le)) > 3
      for: 1m 
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: "{{$value}}"
        description:  "{{ $labels.instance }}  Coredns has longer response time, response More than 3 seconds , please check !"
        summary: "{{ $labels.instance }} 警告! 响应时间较长 Coredns response delay 大于 3 秒 当前值为{{$value}}"
    - alert: coredns-responses-warning
      expr:  histogram_quantile(0.99, sum(rate(coredns_dns_request_duration_seconds_bucket{job="kube-dns"}[5m])) by(server, zone, le)) > 2.5
      for: 1m 
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: "{{$value}}"
        description:  "{{ $labels.instance }}  Coredns has longer response time, response More than 2.5 seconds , please check !"
        summary: "{{ $labels.instance }} 严重! 响应时间较长 Coredns response delay 大于 2.5 秒 当前值为{{$value}}"
    - alert: coredns-responses-info
      expr:  histogram_quantile(0.99, sum(rate(coredns_dns_request_duration_seconds_bucket{job="kube-dns"}[5m])) by(server, zone, le)) > 1.5
      for: 1m 
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: "{{$value}}"
        description:  "{{ $labels.instance }}  Coredns has longer response time, response More than 1.5 seconds , please check !"
        summary: "{{ $labels.instance }} 非常严重! 响应时间较长 Coredns response delay 大于 1.5 秒 当前值为{{$value}}"
    - alert: coredns abort
      expr:  coredns_panic_count_total > 0 
      for: 1m 
      labels:
        severity: critical
        cluster: manage-prod
      annotations:
        current_value: "{{$value}}"
        description: "{{ $labels.instance }} Error Coredns process abnormal interrupt!!"
        summary: "毁灭性错误!{{ $labels.instance }} Coredns 进程异常中断!请检查!"
    - alert: Coredns Query per minute info
      expr:  sum(irate(coredns_dns_request_count_total{zone !="dropped"}[1m]))  > 30000 #计算最近两个数据点每秒查询的次数
      for: 1m 
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: Counter of DNS requests made per zone, protocol and family.
        summary: "严重!{{ $labels.instance }} coredns 每分钟查询次数高于阀值25000.当前值为{{$value}}"
    - alert: Coredns Query per minute warning
      expr:  sum(irate(coredns_dns_request_count_total{zone !="dropped"}[1m]))  > 40000
      for: 1m 
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: Counter of DNS requests made per zone, protocol and family.
        summary: "严重!{{ $labels.instance }} coredns 每分钟查询次数高于阀值30000.当前值为{{$value}}"
    - alert: Coredns Query per minute error
      expr:  sum(irate(coredns_dns_request_count_total{zone !="dropped"}[1m]))  > 50000
      for: 1m 
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: Counter of DNS requests made per zone, protocol and family.
        summary: "非常严重!{{ $labels.instance }} coredns 每分钟查询次数高于阀值50000.当前值为{{$value}}"
    - alert: Coredns SERVFAIL Count
      expr: irate(coredns_dns_response_rcode_count_total{rcode!="NOERROR"} [1m]) and irate(coredns_dns_response_rcode_count_total{rcode!="NXDOMAIN"} [1m]) > 10
      for: 1m 
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: The server failed, the authority server for this domain name refused to respond or REFUSE,
        summary: "警告!coredns 服务器处理请求失败超过10次,当前失败的处理次数{{$value}} 原因服务器拒绝响应或响应拒绝."
    - alert: Coredns Cache misses info
      expr: irate(coredns_cache_misses_total  [1m]) > 10
      for: 1m 
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: DNS cache misses statistical
        summary: "警告!{{ $labels.instance }}  coredns 每分钟服务器缓存misses超过10.当前值为{{$value}}"
    - alert: Coredns Cache misses warning
      expr: irate(coredns_cache_misses_total  [1m]) > 30
      for: 1m 
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: DNS cache misses statistical
        summary: "严重!{{ $labels.instance }}  coredns 每分钟服务器缓存misses超过30.当前值为{{$value}}"
    - alert: Coredns Cache misses error
      expr: irate(coredns_cache_misses_total  [1m]) > 80
      for: 1m 
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: DNS cache misses statistical
        summary: "非常严重!{{ $labels.instance }}  coredns 每分钟服务器缓存misses超过50.当前值为{{$value}}"
    - alert: Coredns Request Time info
      expr: histogram_quantile(0.99,sum(rate(coredns_health_request_duration_seconds_bucket [1m]) ) by(server, zone, le)) > 0.05 #请求所用的时间
      for: 1m 
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary: "警告!实例 {{ $labels.instance }} 每一分钟请求dns 超过设置阀值0.05秒 当前值为{{$value}}"
    - alert: Coredns Request Time warning
      expr: histogram_quantile(0.99,sum(rate(coredns_health_request_duration_seconds_bucket [1m]) ) by(server, zone, le)) > 0.1
      for: 1m 
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary: "严重!实例 {{ $labels.instance }} 每一分钟请求dns 超过设置阀值0.1秒 当前值为{{$value}}"
    - alert: Coredns Request Time error
      expr: histogram_quantile(0.99,sum(rate(coredns_health_request_duration_seconds_bucket [1m]) ) by(server, zone, le)) > 1
      for: 1m 
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary: "非常严重!实例 {{ $labels.instance }} 每一分钟请求dns 超过设置阀值1秒 当前值为{{$value}}"
    - alert: Coredns  The maximum number  open file error
      expr: sum(rate(process_max_fds [1m]) ) > 2000000
      for: 1m 
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary: "非常严重!实例 {{ $labels.instance }} 每一分钟打开文件数超过设置阀值200000 当前值为{{$value}}"
    - alert: Coredns  The maximum number  open file warning
      expr: sum(rate(process_max_fds [1m]) ) > 1500000
      for: 1m 
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary: "严重!实例 {{ $labels.instance }} 每一分钟打开文件数超过设置阀值150000 当前值为{{$value}}"
    - alert: Coredns  The maximum number  open file info
      expr: sum(rate(process_max_fds [1m]) ) > 1000000
      for: 1m 
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary: "警告!实例 {{ $labels.instance }} 每一分钟打开文件数超过设置阀值100000 当前值为{{$value}}"

 etcd

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  labels:
    app: rancher-monitoring
    app.kubernetes.io/instance: rancher-monitoring
    app.kubernetes.io/part-of: rancher-monitoring
  name: etcd-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: etcd.rule
    rules:
    - alert: etcdDown
      expr: up{job="etcd"} == 0
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: etcd {{ $labels.instance }} is down. {{ $labels.instance }} isn't reachable and continueed 1 minute.
        summary: etcd is down
    - alert: NoLeader
      expr: etcd_server_has_leader{job="etcd"} == 0
      labels:
        severity: critical
        cluster: manage-prod
      annotations:
        description: etcd member {{ $labels.instance }} has no leader
        summary: etcd member has no leader
    - alert: HighNumberOfLeaderChanges
      expr: increase(etcd_server_leader_changes_seen_total{job="etcd"}[10m]) > 3
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        description: etcd instance {{ $labels.instance }} has seen {{ $value }} leader
          changes within the last hour
    - record: instance:fd_utilization
      expr: process_open_fds / process_max_fds
    - alert: FdExhaustionClose
      expr: predict_linear(instance:fd_utilization[1h], 3600 * 4) > 1
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: '{{ $labels.job }} instance {{ $labels.instance }} will exhaust
          its file descriptors soon'
        summary: file descriptors 预测4个小时内耗尽
    - alert: FdExhaustionClose
      expr: predict_linear(instance:fd_utilization[10m], 3600) > 1
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        description: '{{ $labels.job }} instance {{ $labels.instance }} will exhaust
          its file descriptors soon'
        summary: file descriptors 预测1个小时内耗尽
    - alert: EtcdMemberCommunicationSlow
      expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
        > 0.15
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: etcd instance {{ $labels.instance }} member communication with
          {{ $labels.To }} is slow
        summary: etcd member communication is slow
    - alert: HighNumberOfFailedProposals
      expr: increase(etcd_server_proposals_failed_total{job="etcd"}[1h]) > 5
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: etcd instance {{ $labels.instance }} has seen {{ $value }} proposal
          failures within the last hour
        summary: etcd 集群大量的提案失败
    - alert: HighFsyncDurations
      expr: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m]))
        > 0.05
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: etcd instance {{ $labels.instance }} fync durations are high
        summary: etcd Wal(预写日志系统)调用的fsync提交的延迟高
    - alert: HighCommitDurations
      expr: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[5m]))
        > 0.25
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: etcd instance {{ $labels.instance }} commit durations are high
        summary: etcd 磁盘后端提交持续时间长
    - alert: EtcdDiskPressure
      expr: sum(etcd_mvcc_db_total_size_in_bytes{job="etcd"}/1024/1024/1024) by (instance) > 200
      for: 60m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "Etcd {{ $labels.instance }} Disk Pressuer"
        summary: "Etcd 数据占用系统盘200G,当前值:{{$value}} "

kube-controller-manager

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  name: kube-controller-manager
  namespace: uk8s-monitor
spec:
  groups:
  - name: kube-controller-manage-monitor.rule
    rules:
    - alert: K8SControllerManagerDown
      expr: absent(up{job="kube-controller-manage-monitor"} == 1)
      for: 1m
      labels:
        severity: critical
        cluster: manage-prod
      annotations:
        description: There is no running K8S controller manager. Deployments and replication controllers are not making progress.
        summary: No kubernetes controller manager are reachable
    - alert: K8SControllerManagerDown
      expr: up{job="kube-controller-manage-monitor"} == 0
      for: 1m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: kubernetes controller manager {{ $labels.instance }} is down. {{ $labels.instance }} isn't reachable
        summary: kubernetes controller manager is down
    - alert: K8SControllerManagerUserCPU
      expr: sum(rate(container_cpu_user_seconds_total{pod=~"kube-controller-manage-monitor.*",container_name!="POD"}[5m]))by(pod) > 5
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: kubernetes controller manager {{ $labels.instance }} is user cpu time > 5s. {{ $labels.instance }} isn't reachable
        summary: kubernetes controller 负载较高超过5s
   
    - alert: K8SControllerManagerUseMemory
      expr: sum(rate(container_memory_usage_bytes{pod=~"kube-controller-manage-monitor.*",container_name!="POD"}[5m])/1024/1024)by(pod) > 20
      for: 5m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        description: kubernetes controller manager {{ $labels.instance }} is use memory More than 20MB
        summary: kubernetes controller 使用内存超过20MB
   
    - alert: K8SControllerManagerQueueTimedelay
      expr: histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{job="kubernetes-controller-manager"}[5m])) by(le)) > 10
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: kubernetes controller manager {{ $labels.instance }} is QueueTimedelay More than 10s
        summary: kubernetes controller 队列停留时间超过10秒,请检查ControllerManager

kubelet

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  generation: 3
  labels:
    app: rancher-monitoring
    app.kubernetes.io/instance: rancher-monitoring
    app.kubernetes.io/part-of: rancher-monitoring
  name: kube-kubelet-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: kubelet.rule
    rules:
    - alert: K8SNodeNotReady
      expr: kube_node_status_condition{condition="Ready",status="true"} == 0
      for: 2m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        summary: "Node status is NotReady"
        description: "The Kubelet on {{ $labels.node }} has not checked in with the API, or has set itself to NotReady, for more than an hour"
    - alert: K8SManyNodesNotReady
      expr: count(kube_node_status_condition{condition="Ready",status="true"} == 0)
        > 1 and (count(kube_node_status_condition{condition="Ready",status="true"} ==
        0) / count(kube_node_status_condition{condition="Ready",status="true"})) > 0.2
      for: 5m
      labels:
        severity: error
        cluster: manage-prod
      annotations:
        summary: "大量的node节点没有Ready"
        description: '{{ $value }}% of Kubernetes nodes are not ready'
    - alert: K8SKubeletDown
      expr: count(up{job="kubelet"} == 0) / count(up{job="kubelet"}) * 100 > 10
      for: 2m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        description: Prometheus failed to scrape {{ $value }}% of kubelets.
        summary: kubelet cannot be scraped
        #- alert: K8SManyKubeletDown
        #  expr: (absent(up{job="kubelet"} == 1) or count(up{job="kubelet"} == 0) / count(up{job="kubelet"}))
        #    * 100 > 30
        #  for: 10m
        #  labels:
        #    severity: warning
        #    cluster: manage-prod
        #  annotations:
        #    description: Prometheus failed to scrape {{ $value }}% of kubelets, or all Kubelets have disappeared from service discovery.
        #    summary: Many Kubelets cannot be scraped
    - alert: K8SKubeletTooManyPods
      expr: kubelet_running_pod_count > 50
      for: 10m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: Kubelet {{$labels.instance}} is running {{$value}} pods, close to the limit of 50
        summary: 单节点pod数大于50

kube-scheduler

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  generation: 3
  labels:
    app: rancher-monitoring
    app.kubernetes.io/instance: rancher-monitoring
    app.kubernetes.io/part-of: rancher-monitoring
  name: kube-scheduler
  namespace: uk8s-monitor
spec:
  groups:
  - name: kube-scheduler.rule
    rules:
    - alert: K8SSchedulerDown
      expr: absent(up{job="kube-scheduler"} == 1)
      for: 1m
      labels:
        severity: critical
        cluster: manage-prod
      annotations:
        description: "There is no running K8S scheduler. New pods are not being assigned to nodes."
        summary: "all k8s scheduler is down"
    - alert: K8SSchedulerDown
      expr: up{job="kube-scheduler"} == 0
      for: 1m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: "K8S scheduler {{ $labels.instance }} is no running. New pods are not being assigned to nodes."
        summary: "k8s scheduler {{ $labels.instance }} is down"
    - alert: K8SSchedulerUserCPU
      expr: sum(rate(container_cpu_user_seconds_total{pod=~"kube-scheduler.*",container_name!="POD"}[5m]))by(pod) > 1
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetes scheduler {{ $labels.instance }} is user cpu time > 1s. {{ $labels.instance }} isn't reachable"
        summary: "kubernetes scheduler 负载较高超过1s,当前值为{{$value}}"
   
    - alert: K8SSchedulerUseMemory
      expr: sum(rate(container_memory_usage_bytes{pod=~"kube-scheduler.*",container_name!="POD"}[5m])/1024/1024)by(pod) > 20
      for: 5m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess scheduler {{ $labels.instance }} is use memory More than 20MB"
        summary: "kubernetes scheduler 使用内存超过20MB,当前值为{{$value}}MB"
   
    - alert: K8SSchedulerPodPending
      expr: sum(scheduler_pending_pods{job="kubernetes-scheduler"})by(queue) > 5
      for: 5m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: "kubernetess scheduler {{ $labels.instance }} is Pending pod More than 5"
        summary: "kubernetes scheduler pod无法调度 > 5,当前值为{{$value}}"
   
    - alert: K8SSchedulerPodPending
      expr: sum(scheduler_pending_pods{job="kubernetes-scheduler"})by(queue) > 10
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: kubernetess scheduler {{ $labels.instance }} is Pending pod More than 10
        summary: "kubernetes scheduler pod无法调度 > 10,当前值为{{$value}}"
   
    - alert: K8SSchedulerPodPending
      expr: sum(rate(scheduler_binding_duration_seconds_count{job="kubernetes-scheduler"}[5m])) > 1
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: kubernetess scheduler {{ $labels.instance }}
        summary: "kubernetes scheduler pod 无法绑定调度有问题,当前值为{{$value}}"
   
    - alert: K8SSchedulerVolumeSpeed
      expr: sum(rate(scheduler_volume_scheduling_duration_seconds_count{job="kubernetes-scheduler"}[5m])) > 1
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: kubernetess scheduler {{ $labels.instance }}
        summary: "kubernetes scheduler pod Volume 速度延迟,当前值为{{$value}}"
   
    - alert: K8SSchedulerClientRequestSlow
      expr: histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job="kubernetes-scheduler"}[5m])) by (verb, url, le)) > 1
      for: 5m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        description: kubernetess scheduler {{ $labels.instance }}
        summary: "kubernetes scheduler 客户端请求速度延迟,当前值为{{$value}}"

kube-state-metrics

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  generation: 3
  labels:
    app: rancher-monitoring
    app.kubernetes.io/instance: rancher-monitoring
    app.kubernetes.io/part-of: rancher-monitoring
  name: kube-state-metrics-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: kube-state-metrics.rules
    rules:
    - alert: DaemonSetNotReady
      expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled
        * 100 < 100
      for: 15m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: Only {{$value}}% of desired pods scheduled and ready for daemonset {{$labels.namespace}}/{{$labels.daemonset}}
        summary: DaemonSet pod status is not ready
    - alert: DaemonSetsNotScheduled
      expr: kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled
        > 0
      for: 10m
      labels:
        severity: warning
        cluster: manage-prod
      annotations:
        description: daemonsets {{$labels.namespace}}/{{$labels.daemonset}} is not scheduled.
        summary: Daemonsets are not scheduled correctly

node

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  labels:
  name: node-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: Node.rules
    rules:
    - alert: NodeMemUseHigh
      expr: sum by (node)(node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / sum by (node)(node_memory_MemTotal_bytes) > 0.9
      for: 5m
      labels:
        severity: warning
        cluster: prodidc
      annotations:
        description: "Node memory usage 高于90%"
        summary: "Node memory usage 高于90%"
    - alert: NodeUnschedulable
      expr: sum(kube_node_spec_unschedulable) > 0
      for: 5m
      labels:
        severity: warning
        cluster: prodidc
      annotations:
        description: a node is unschedulable for 5 minutes
        summary: Node is unschedulable
    - alert: NodeExporterDown
      expr: absent(up{job="prometheus-node-exporter"} == 1)
      for: 2m
      labels:
        severity: warning
        cluster: prodidc
      annotations:
        description: Prometheus could not scrape a node-exporter/{{$labels.node}} for more than 2m, or node-exporters have disappeared from discovery
        summary: Prometheus could not scrape a node-exporter
  
    - alert: NodeCpuUseHigh
      expr: 1-avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (node) > 0.9
      for: 5m
      labels:
        severity: critical
        cluster: prodidc
      annotations:
        description: "node/{{$labels.node}} CPU使用率高于90%"
        summary: "node/{{$labels.node}} CPU使用率高于90%"

deployment

groups:
- name: Deployment.rule
  rules:
  - alert: ReplicasUnavailable
    expr: kube_deployment_status_replicas_unavailable > 0
    for: 5m
    labels:
      severity: P2
      cluster: prodidc
    annotations:
      summary: "Deployment replicas status unavailable"
      description: "Deployment {{ $labels.namespace }}/{{ $labels.deployment }} status is unavailable, have {{ $value }} pod is unavailable more than 15 minutes."
  - alert: DeploymentReplicasNotUpdated
    expr: ((kube_deployment_status_replicas_updated != kube_deployment_spec_replicas)
      or (kube_deployment_status_replicas_available != kube_deployment_spec_replicas))
      unless (kube_deployment_spec_paused == 1)
    for: 15m
    labels:
      severity: P2
      cluster: prodidc
    annotations:
      description: Replicas are not updated and available for deployment {{$labels.namespace}}/{{$labels.deployment}}
      summary: pod个数小于replicas预期值

ingress-nginx

groups:
- name: ingress-nginx.rule
  rules:
  - alert: ingressDown
    expr: kube_pod_status_ready{condition="true",pod=~"ingress.*-controller.*"} < 1
    for: 1m
    labels:
      severity: P0
      cluster: prodidc
    annotations:
      summary: "ingress nginx namespace:{{$labels.namespace}} podname:{{$labels.pod}} is Down"
      description: "ingress nginx namespace:{{$labels.namespace}} podname:{{$labels.pod}}, for more than an minute"

  - alert: ingressControllerConn
    expr: sum by (instance)(avg_over_time(nginx_ingress_controller_nginx_process_connections{}[2m])) > 100000 
    for: 5m
    labels:
      severity: P1
      cluster: prodidc
    annotations:
      summary: "ingress nginx {{$labels.instance}} connection more than 100000"
      description: "ingress nginx {{$labels.instance}} connection more than 100000, for more than five minute"

  - alert: ingressMemUseage
    expr: sum(container_memory_working_set_bytes{pod=~"ingress-.*",container!~"|filebeat|POD"} ) by (namespace,pod,service) / sum(container_spec_memory_limit_bytes{pod=~"ingress-.*",container!~"|filebeat|POD",namespace!=""}) by (namespace,pod,service) * 100 > 90  and (sum(container_memory_working_set_bytes) by (namespace,pod,service)/sum(container_spec_memory_limit_bytes) by (namespace,pod,service)) != Inf
    for: 2m
    labels:
      severity: P1
      cluster: prodidc
    annotations:
      summary: "ingress controller {{$labels.instance}} memory useage moth than 90%"
      description: "ingress controller {{$labels.instance}} memory useage moth than 90%, for more than five minute"

  - alert: ingressCpuUseage
    expr: sum(rate(container_cpu_usage_seconds_total{pod=~"ingress-.*",image!=""}[1m])) by (pod, namespace,service) / (sum(container_spec_cpu_quota{pod=~"ingress-.*",image!=""}/100000) by (pod, namespace,service)) * 100  > 90 and  sum by (pod,namespace,service)( rate(container_cpu_usage_seconds_total{image!="", namespace!=""}[1m] ) ) * 100  != Inf
    for: 2m
    labels:
      severity: P1
      cluster: prodidc
    annotations:
      summary: "ingress controller {{$labels.instance}} memory useage moth than 90%"
      description: "ingress controller {{$labels.instance}} memory useage moth than 90%, for more than five minute"

  - alert: controllerSSLtime
    expr: nginx_ingress_controller_ssl_expire_time_seconds < (time() + (20 * 24 * 3600))
    labels:
      severity: P2
      cluster: prodidc
    annotations:
      summary: "ingress ssl证书有效期小于20天"
      description: "ingress controller ssl time less than 20 day"

pod

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  annotations:
    meta.helm.sh/release-namespace: uk8s-monitor
    prometheus-operator-validated: "true"
  labels:
  name: pod-rules
  namespace: uk8s-monitor
spec:
  groups:
  - name: Pods.rule
    rules:
    - alert: PodHighCPUUsage
      expr: sum(rate(container_cpu_usage_seconds_total{image!=""}[1m])) by (pod, namespace,service) / (sum(container_spec_cpu_quota{image!=""}/100000) by (pod, namespace,service)) * 100  > 95 and  sum by (pod,namespace,service)( rate(container_cpu_usage_seconds_total{image!="", namespace!=""}[1m] ) ) * 100  != Inf
      for: 5m
      labels:
        severity: info
        cluster: manage-prod
      annotations:
        current_value: '{{$value}}'
        summary:  "Pod cpu Usage 高于95%"
        description: "{{ $labels.namespace }}.{{ $labels.pod  }} cpu usage is high above 95%  for more than 5 minute."
   
    - alert: PodRssMemHigh
      expr: sum by(namespace, pod) (container_memory_rss{container!~"|filebeat|POD",namespace!="",pod!=""}) / sum by(namespace, pod) (container_spec_memory_limit_bytes{container!~"|filebeat|POD",namespace!="",pod!=""}) * 100 > 95 and (sum by(namespace, pod) (container_memory_rss) / sum by(namespace, pod) (container_spec_memory_limit_bytes)) != +Inf
      for: 10m
      labels:
        cluster: manage-prod
        severity: info
      annotations:
        description: "Pod ({{$labels.namespace}}/{{$labels.pod}}) Rss memory 高于95%,请检查合理性"
        summary: "Pod ({{$labels.namespace}}/{{$labels.pod}}) Rss memory 高于95%,请检查合理性"
   
    - alert: PodNotReady
      expr: sum by (namespace, pod) (kube_pod_status_ready{condition="true",pod!~".*-backup-.*"} != 1)
      for: 5m
      labels:
        severity: critical
        cluster: manage-prod
      annotations:
        summary: "Pod ({{$labels.namespace}}/{{$labels.pod}}) is NotReady"
        description: "服务({{$labels.namespace}}/{{$labels.pod}})异常,请尽快检查"

    - alert: PodDown
      annotations:
        container: '{{ $labels.container }}'
        k8scluster: '{{ $labels.k8scluster}}'
        namespace: '{{ $labels.namespace }}'
        pod: '{{ $labels.pod }}'
        summary: Pod Down
      expr: kube_pod_status_phase{phase="Unknown"} == 1 or kube_pod_status_phase{phase="Failed"}
        == 1
      for: 1m
      labels:
        cluster: manage-prod
        receiver_group: '{{ $labels.k8scluster}}_{{ $labels.namespace }}'
        service: prometheus_bot
        severity: critical
    - alert: PodRestart
      annotations:
        container: '{{ $labels.container }}'
        k8scluster: '{{ $labels.k8scluster}}'
        namespace: '{{ $labels.namespace }}'
        summary: Pod Restart
      expr: changes(kube_pod_container_status_restarts_total{pod !~ "analyzer.*"}[10m])
        > 0
      for: 1m
      labels:
        pod: '{{ $labels.pod }}'
        receiver_group: '{{ $labels.k8scluster}}_{{ $labels.namespace }}'
        service: prometheus_bot
        severity: critical
        cluster: manage-prod
    - alert: PodOOMKilled
      annotations:
        container: '{{ $labels.container }}'
        k8scluster: '{{ $labels.k8scluster}}'
        namespace: '{{ $labels.namespace }}'
        pod: '{{ $labels.pod }}'
        summary: Pod OOMKilled
      expr: changes(kube_pod_container_status_terminated_reason{reason="OOMKilled"}[1m])
        > 0
      for: 1m
      labels:
        receiver_group: '{{ $labels.k8scluster}}_{{ $labels.namespace }}'
        service: prometheus_bot
        severity: critical
        cluster: manage-prod

  • 0
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Cloud孙文波

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值