初次搭建Grafana 开源的Loki 日志系统

初次搭建Grafana 开源的Loki 日志系统

192.168.1.11 grafana + promtail + nginx
192.168.1.12 loki

promtail 采集nginx日志写入loki,grafana读取loki数据库日志信息

nginx

yum -y install gcc make pcre-devel openssl-devel
tar xf nginx-1.17.6.tar.gz
cd nginx-1.17.6/
./configure  --prefix=/usr/local/nginx  --user=nginx  --with-http_ssl_module
make && make install
useradd nginx -s /sbin/nologin
/usr/local/nginx/sbin/nginx
curl localhost:80
tail /usr/local/nginx/logs/access.log
127.0.0.1 - - [09/Oct/2022:22:13:41 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0"

promtail

wget https://github.com/grafana/loki/releases/download/v2.2.1/promtail-linux-amd64.zip
mkdir -pv /opt/app/promtail
cat <<EOF> /opt/app/promtail/promtail.yaml
server:
  http_listen_port: 9080
  grpc_listen_port: 0

positions:
  filename: /var/log/positions.yaml # This location needs to be writeable by promtail.

client:
  url: http://192.168.1.12:3100/loki/api/v1/push   #loli的地址

scrape_configs:
 - job_name: system
   pipeline_stages:
   static_configs:
   - targets:
      - localhost
     labels:
      job: varlogs     #这个labels可以在grafana里搜索
      host: yourhost
      __path__: usr/local/nginx/logs/*.log   #采集日志路径
EOF
cat <<EOF >/etc/systemd/system/promtail.service
[Unit]
Description=promtail server
Wants=network-online.target
After=network-online.target

[Service]
ExecStart=/opt/app/promtail/promtail -config.file=/opt/app/promtail/promtail.yaml
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=promtail
[Install]
WantedBy=default.target
EOF
unzip promtail-linux-amd64.zip
mv promtail-linux-amd64 /opt/app/promtail/promtail
systemctl daemon-reload  &&  systemctl restart promtail  &&systemctl status promtail

http://192.168.1.11:9080/targets

grafana

cat >/etc/yum.repos.d/grafana.repo<<EOF
[grafana]
name=grafana
baseurl=https://mirrors.tuna.tsinghua.edu.cn/grafana/yum/rpm
repo_gpgcheck=0
gpgkey=https://packages.grafana.com/gpg.key
enabled=1
EOF
yum install grafana-8.1.2-1.x86_64
systemctl start grafana-server.service  &&systemctl status grafana-server.service

http://192.168.1.11:3000

loki 192.168.1.12

wget https://github.com/grafana/loki/releases/download/v2.2.1/loki-linux-amd64.zip
mkdir -pv /opt/app/loki
cat <<EOF> /opt/app/loki/loki.yaml
auth_enabled: false

server:
  http_listen_port: 3100
  grpc_listen_port: 9096

ingester:
  wal:
    enabled: true
    dir: /opt/app/loki/wal
  lifecycler:
    address: 127.0.0.1
    ring:
      kvstore:
        store: inmemory
      replication_factor: 1
    final_sleep: 0s
  chunk_idle_period: 1h       # Any chunk not receiving new logs in this time will be flushed
  max_chunk_age: 1h           # All chunks will be flushed when they hit this age, default is 1h
  chunk_target_size: 1048576  # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
  chunk_retain_period: 30s    # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
  max_transfer_retries: 0     # Chunk transfers disabled

schema_config:
  configs:
    - from: 2020-10-24
      store: boltdb-shipper
      object_store: filesystem
      schema: v11
      index:
        prefix: index_
        period: 24h

storage_config:
  boltdb_shipper:
    active_index_directory: /opt/app/loki/boltdb-shipper-active
    cache_location: /opt/app/loki/boltdb-shipper-cache
    cache_ttl: 24h         # Can be increased for faster performance over longer query periods, uses more disk space
    shared_store: filesystem
  filesystem:
    directory: /opt/app/loki/chunks

compactor:
  working_directory: /opt/app/loki/boltdb-shipper-compactor
  shared_store: filesystem

limits_config:
  reject_old_samples: true
  reject_old_samples_max_age: 168h

chunk_store_config:
  max_look_back_period: 0s

table_manager:
  retention_deletes_enabled: false
  retention_period: 0s


ruler:
  storage:
    type: local
    local:
      directory: /opt/app/loki/rules
  rule_path: /opt/app/loki/rules-temp
  alertmanager_url: http://localhost:9093
  ring:
    kvstore:
      store: inmemory
  enable_api: true
EOF
cat <<EOF >/etc/systemd/system/loki.service
[Unit]
Description=loki server
Wants=network-online.target
After=network-online.target

[Service]
ExecStart=/opt/app/loki/loki -config.file=/opt/app/loki/loki.yaml
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=loki
[Install]
WantedBy=default.target
EOF
unzip loki-linux-amd64.zip 
mv loki-linux-amd64 /opt/app/loki/loki
systemctl daemon-reload  &&  systemctl restart loki  &&systemctl status loki

grafana 上配置 loki 数据源

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

查询

{job="varlogs"}

在这里插入图片描述

{job="varlogs",filename="/usr/local/nginx/logs/access.log"}

在这里插入图片描述
在这里插入图片描述

过滤单个条件
{job="varlogs",filename="/usr/local/nginx/logs/access.log"} |="404"

在这里插入图片描述

过滤多个条件
{job="varlogs",filename="/usr/local/nginx/logs/access.log"} |="404" |= "Windows"

在这里插入图片描述

时间范围查找

在这里插入图片描述

统计条数
count_over_time({job="varlogs"}[30m]) 
count_over_time({job="varlogs"}|="404"[12h]) 

在这里插入图片描述

LogQL 语法

表达式遵循https://github.com/google/re2/wiki/Syntax语法。

选择器

对于查询表达式的标签部分,将放在{}中,多个标签表达式用逗号分隔:

  =   完全相同。
  !=  不平等。
  =~  正则表达式匹配。
  !~  不要正则表达式匹配。
{job="varlogs",filename="/usr/local/nginx/logs/access.log"}

过滤表达式

目前支持的操作符:

  |=   line包含字符串。
  !=   line不包含字符串。
  |~   line匹配正则表达式。
  !~   line与正则表达式不匹配。

编写日志流选择器后,您可以通过编写搜索表达式进一步过滤结果。搜索表达式可以文本或正则表达式。

{job=“mysql”} |= “error”
{name=“kafka”} |~ “tsdb-ops.*io:2003”
{instance=~“kafka-[23]”,name=“kafka”} != kafka.server:type=ReplicaManager

支持多个过滤:

{job=“mysql”} |= “error” |!= “timeout”

统计

count_over_time({job="varlogs"}[30m]) 
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值