基于loki的日志收集方案

背景:由于目前公司系统日志量不大,再加上elk是在太重,太耗费资源了。所以决定采用loki + promtail + alertmanager +gafana 作为日志采集方案。此处采用docker进行部署

基于docker部署

1、loki部署

1、创建目录
mkdir -p /opt/loki/{chunks,index,rules}

2、配置文件准备
vim /opt/loki/loki-config.yaml

loki-config.yaml

auth_enabled: false

server:
  http_listen_port: 3100
  grpc_listen_port: 3110
  grpc_server_max_recv_msg_size: 1073741824  #grpc最大接收消息值,默认4m
  grpc_server_max_send_msg_size: 1073741824  #grpc最大发送消息值,默认4m

ingester:
  lifecycler:
    address: 127.0.0.1
    ring:
      kvstore:
        store: inmemory
      replication_factor: 1
    final_sleep: 0s
  chunk_idle_period: 5m
  chunk_retain_period: 30s
  max_transfer_retries: 0
  max_chunk_age: 20m  #一个timeseries块在内存中的最大持续时间。如果timeseries运行的时间超过此时间,则当前块将刷新到存储并创建一个新块

schema_config:
  configs:
    - from: 2021-01-01
      store: boltdb
      object_store: filesystem
      schema: v11
      index:
        prefix: index_
        period: 168h

storage_config:
  boltdb:
    directory: /opt/loki/index #存储索引地址
  filesystem:
    directory: /opt/loki/chunks

limits_config:
  enforce_metric_name: false
  reject_old_samples: true
  reject_old_samples_max_age: 168h
  ingestion_rate_mb: 30  #修改每用户摄入速率限制,即每秒样本量,默认值为4M
  ingestion_burst_size_mb: 15  #修改每用户摄入速率限制,即每秒样本量,默认值为6M

chunk_store_config:
        #max_look_back_period: 168h   #回看日志行的最大时间,只适用于即时日志
        #  max_look_back_period: 0s
        #
        #  table_manager:
        #    retention_deletes_enabled: false #日志保留周期开关,默认为false
        #      retention_period: 0s  #日志保留周期
        #
ruler:
  storage:
    type: local
    local:
      directory: /etc/rules
  rule_path: /tmp/scratch
  alertmanager_url: http://alertmanager-ip:9093
  ring:
    kvstore:
      store: inmemory
  enable_api: true 

3、部署
docker run -d \
--name loki --privileged=true \
--restart=always \
-v /opt/loki/loki-config.yaml:/mnt/config/loki-config.yaml \
-v /opt/loki/rules:/etc/rules \
-v /opt/loki/index:/opt/loki/index \
-v /opt/loki/chunks:/opt/loki/chunks \
-p 3100:3100 \
grafana/loki:2.1.0 -config.file=/mnt/config/loki-config.yaml

2、Promtail

1、创建配置文件
vim /opt/promtail/promtail-config.yaml

server:
  http_listen_port: 9080
  grpc_listen_port: 0

positions:
  filename: /tmp/positions.yaml

#把loki当客户端连接
clients:
  - url: http://loki-ip:3100/loki/api/v1/push

scrape_configs:
 - job_name: system
   pipeline_stages:
   static_configs:
   - targets:
      - 192.168.100.101
     labels:
      #标签,用于后面的查询
      job: system
      host: 192.168.100.101-ntp
      __path__: /var/log/messages

2、部署

docker run -d \
--name promtail \
--privileged=true \
--restart=always \
-v /opt/promtail/promtail-config.yaml:/mnt/config/promtail-config.yaml \
-v /var/log:/var/log:ro \
grafana/promtail:2.1.0 -config.file=/mnt/config/promtail-config.yaml

3、Alertmanager

vim /opt/alertmanager/alertmanager.yml

global:
  resolve_timeout: 5m
  smtp_from: 'ERPDB@XXXXXXX.net'
  smtp_smarthost: 'mail.XXXXXX.net:25'
  smtp_auth_username: 'root'
  smtp_auth_password: '123456'
  smtp_hello: 'XXXXXXXX.net'
route:
  repeat_interval: 1h
  receiver: 'default-receiver'

receivers:
- name: 'default-receiver'
  email_configs:
  - to: 'XXXXXX@qq.com '
    send_resolved: true

部署

docker run -d \
--name alertmanager \
--restart=always \
-p 9093:9093 \
-v /opt/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml \ 
prom/alertmanager

基于k8s部署

kubectl create namespace log

alertmanager.yml

apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: log
data:
  alertmanager.yml: |
    global:
      resolve_timeout: 5m
      smtp_from: 'ERPDB@XXXXXXX.net'
      smtp_smarthost: 'mail.XXXXXX.net:25'
      smtp_auth_username: 'root'
      smtp_auth_password: '123456'
      smtp_hello: 'XXXXXXXX.net'
    route:
      repeat_interval: 1h
      receiver: 'default-receiver'
    receivers:
    - name: 'default-receiver'
      email_configs:
      - to: 'XXXXXX@qq.com '
        send_resolved: true

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: alertmanager
  namespace: log
spec:
  replicas: 1
  selector:
    matchLabels:
      app: alertmanager
  template:
    metadata:
      labels:
        app: alertmanager
    spec:
      containers:
      - name: alertmanager
        image: prom/alertmanager
        args:
        - "--config.file=/etc/alertmanager/alertmanager.yml"
        ports:
        - containerPort: 9093
        volumeMounts:
        - name: alertmanager-config
          mountPath: /etc/alertmanager
      volumes:
      - name: alertmanager-config
        configMap:
          name: alertmanager-config

---
apiVersion: v1
kind: Service
metadata:
  name: alertmanager
  namespace: log
spec:
  type: NodePort
  ports:
  - protocol: TCP
    port: 9093
    targetPort: 9093
    nodePort: 30000 # 指定一个未使用的 NodePort 端口号
  selector:
    app: alertmanager

loki.yml

apiVersion: v1
kind: ConfigMap
metadata:
  name: loki-config
  namespace: log
data:
  loki-config.yaml: |
    auth_enabled: false
    server:
      http_listen_port: 3100
    ingester:
      lifecycler:
        address: 127.0.0.1
        ring:
          kvstore:
            store: inmemory
          replication_factor: 1
        final_sleep: 0s
      chunk_idle_period: 5m
      chunk_retain_period: 30s
      max_transfer_retries: 0
      max_chunk_age: 20m  
    schema_config:
      configs:
        - from: 2021-01-01
          store: boltdb
          object_store: filesystem
          schema: v11
          index:
            prefix: index_
            period: 168h
    storage_config:
      boltdb:
        directory: /opt/loki/index #存储索引地址
      filesystem:
        directory: /opt/loki/chunks
    limits_config:
      enforce_metric_name: false
      reject_old_samples: true
      reject_old_samples_max_age: 168h
      ingestion_rate_mb: 30
      ingestion_burst_size_mb: 15
    table_manager:
      retention_period: 17472h
    ruler:
      storage:
        type: local
        local:
          directory: /opt/loki/rules
      rule_path: /tmp/scratch
      alertmanager_url: http://alertmanager.log.svc.cluster.local:9093
      ring:
        kvstore:
          store: inmemory
      enable_api: true 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: loki
  namespace: log
spec:
  selector:
    matchLabels:
      app: loki
  template:
    metadata:
      labels:
        app: loki
    spec:
      initContainers:
        - name: change-group
          image: ubuntu
          command: ["chown", "10001:10001", "/opt/loki"]
          volumeMounts:
            - name: loki-data
              mountPath: /opt/loki
      containers:
        - name: loki
          image: grafana/loki:2.8.0
          args:
            - -config.file=/etc/loki/loki-config.yaml
          ports:
            - containerPort: 3100  # 根据您的需求指定Loki监听的端口号
          securityContext:
            runAsGroup: 0
            runAsUser: 0
          volumeMounts:
            - name: loki-config
              mountPath: /etc/loki
            - name: loki-data              
              mountPath: /opt/loki
      volumes:
        - name: loki-config
          configMap:
            name: loki-config
        - name: loki-data
          hostPath:
            path: /opt/loki

---
apiVersion: v1
kind: Service
metadata:
  name: loki
  namespace: log
spec:
  selector:
    app: loki
  type: NodePort
  ports:
    - protocol: TCP
      port: 3100
      targetPort: 3100
      nodePort: 31000  # 选择一个不同的可用端口号,例如31000


promtail.yml

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: promtail-config
  namespace: log
data:
  promtail.yaml: |
    server:
      http_listen_port: 9080
      grpc_listen_port: 0
    clients:
    - url: http://loki:3100/loki/api/v1/push
    positions:
      filename: /run/promtail/positions.yaml
    target_config:
      sync_period: 10s
    scrape_configs:
    - job_name: pod-logs
      kubernetes_sd_configs:
        - role: pod
      pipeline_stages:
        - docker: {}
      relabel_configs:
        - source_labels:
            - __meta_kubernetes_pod_node_name
          target_label: __host__
        - action: labelmap
          regex: __meta_kubernetes_pod_label_(.+)
        - action: replace
          replacement: $1
          separator: /
          source_labels:
            - __meta_kubernetes_namespace
            - __meta_kubernetes_pod_name
          target_label: job
        - action: replace
          source_labels:
            - __meta_kubernetes_namespace
          target_label: namespace
        - action: replace
          source_labels:
            - __meta_kubernetes_pod_name
          target_label: pod
        - action: replace
          source_labels:
            - __meta_kubernetes_pod_container_name
          target_label: container
        - replacement: /var/log/pods/*$1/*.log
          separator: /
          source_labels:
            - __meta_kubernetes_pod_uid
            - __meta_kubernetes_pod_container_name
          target_label: __path__
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: promtail-clusterrole
  namespace: log
rules:
  - apiGroups: [""]
    resources:
    - nodes
    - services
    - pods
    verbs:
    - get
    - watch
    - list
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: promtail-serviceaccount
  namespace: log
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: promtail-clusterrolebinding
subjects:
    - kind: ServiceAccount
      name: promtail-serviceaccount
      namespace: log
roleRef:
    kind: ClusterRole
    name: promtail-clusterrole
    apiGroup: rbac.authorization.k8s.io
--- 
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: promtail-daemonset
  namespace: log
spec:
  selector:
    matchLabels:
      app: promtail
  template:
    metadata:
      labels:
        app: promtail
    spec:
      serviceAccount: promtail-serviceaccount
      securityContext:
        runAsGroup: 0
        runAsUser: 0
      #tolerations:
      #- effect: NoSchedule
      #  key: node-role.kubernetes.io/master
      #  operator: Exists
      containers:
      - name: promtail-container
        image: grafana/promtail
        args:
        - -config.file=/etc/promtail/promtail.yaml
        env: 
        - name: 'HOSTNAME' # needed when using kubernetes_sd_configs
          valueFrom:
            fieldRef:
              fieldPath: 'spec.nodeName'
        volumeMounts:
        - name: run
          mountPath: /run/promtail
        - name: logs
          mountPath: /var/log
        - name: promtail-config
          mountPath: /etc/promtail
        - mountPath: /var/lib/docker/containers
          name: varlibdockercontainers
          readOnly: true
      volumes:
      - name: logs
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers
      - name: run
        hostPath:
          path: /run/promtail
      - name: promtail-config
        configMap:
          name: promtail-config


grafana.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: grafana
  namespace: log
spec:
  replicas: 1
  selector:
    matchLabels:
      app: grafana
  template:
    metadata:
      labels:
        app: grafana
    spec:
      initContainers:
        - name: change-group
          image: ubuntu
          command: ["chown", "472:472", "/var/lib/grafana"]
          volumeMounts:
            - name: grafana-storage
              mountPath: /var/lib/grafana
      containers:
        - name: grafana
          image: grafana/grafana:9.0.0
          ports:
            - containerPort: 3000
          volumeMounts:
            - name: grafana-storage
              mountPath: /var/lib/grafana
      volumes:
        - name: grafana-storage
          hostPath:
            path: /opt/grafana

---

apiVersion: v1
kind: Service
metadata:
  name: grafana
  namespace: log
spec:
  selector:
    app: grafana
  type: NodePort
  ports:
    - protocol: TCP
      port: 3000
      targetPort: 3000
      nodePort: 30001


  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值