java+log日志服务器_Logserver日志服务器结构

一个日志服务架构主要包括3个部分:日志采集agent,日志存储,及日志浏览

本日志服务方案采用logstash+elasticsearch+kibana的组合搭建,其中logstash负责日志的采集和入库,elasticsearch负责日志的存储和索引,kubana负责日志的搜索和前端展示

在实际部署过程中,为保证可用和节省资源,主要注意以下几点:

1.logstash支持很多socket远程input和ouput插件,但为不影响主干业务流,尽量采用日志文件异步读出写入es的方式,如log4j支持socketappend,但需要修改既有应用的配置,而且一旦server端出错,应用日志没有本地文件,容易丢失

2.logstash agent可以直接写入es,但为减小es的压力,采用redis和logstash

indexer的较色来延长消息传递流程,降低各环节的压力

3.logstash基于java实现,为节约系统资源,不论agent还是indexer,单台物理机只配置一个文件,采用一个守护进程启动服务

a4c26d1e5885305701be709a3d33442f.png

cat

/usr/local/logserver/logstash/conf/allinone_indexer.conf

input

{

redis

{

host =>

"10.241.223.112"

type =>

"log4j-account"

data_type =>

"list"

key =>

"log4j-account"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-activity"

data_type =>

"list"

key =>

"log4j-activity"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-databag"

data_type =>

"list"

key =>

"log4j-databag"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-document"

data_type =>

"list"

key =>

"log4j-document"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-job"

data_type =>

"list"

key =>

"log4j-job"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-monitor"

data_type =>

"list"

key =>

"log4j-monitor"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-notification"

data_type =>

"list"

key =>

"log4j-notification"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-qto"

data_type =>

"list"

key =>

"log4j-qto"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-search"

data_type =>

"list"

key =>

"log4j-search"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-social"

data_type =>

"list"

key =>

"log4j-social"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"log4j-storage"

data_type =>

"list"

key =>

"log4j-storage"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"tomcat-account"

data_type =>

"list"

key =>

"tomcat-account"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"tomcat-databag"

data_type =>

"list"

key =>

"tomcat-databag"

format =>

"json_event"

}

redis

{

host =>

"10.241.223.112"

type =>

"tomcat-monitor"

data_type =>

"list"

key =>

"tomcat-monitor"

format =>

"json_event"

}

}

filter {

multiline

{

type =>

"log4j-account"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-activity"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-databag"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-document"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-job"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-monitor"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-notification"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-qto"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-search"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-social"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"log4j-storage"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"tomcat-account"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"tomcat-databag"

pattern =>

"^\s"

what =>

"previous"

}

multiline

{

type =>

"tomcat-monitor"

pattern =>

"^\s"

what =>

"previous"

}

}

output {

elasticsearch {

host

=> "10.241.223.112"

cluster

=> "logstashelasticsearch"

port

=> 9300

}

}

cat

/usr/local/logserver/logstash/conf/nginx_agent.conf

input {

file {

type => "nginx"

path => ["/data/logs/nginx/*.log"

]

}

}

output {

redis {

host => "10.241.223.112"

data_type => "list"

key => "nginx"

type => "nginx"

}

}

cat

/usr/local/logserver/logstash/startup.sh

#!/bin/bash

workdir=$(cd $(dirname $0);pwd)

#echo $workdir

usage(){

cat <

This script start, stop and retart logstash agent and indexer

of apps

Usage: $(basename $0) [ agent | indexer | all ] app1 app2 app3

...

EOF1

}

[ $# -lt 2 ] && usage

&& exit

start_mode=$1

for appname in "$@";do

case "$appname" in

"agent" )

continue

;;

"indexer" )

continue

;;

"all" )

continue

;;

*)

if [ "$start_mode" =

"all" ]; then

java

-jar $workdir/logstash.jar agent -f

$workdir/conf/"$appname"_indexer.conf &

java

-jar $workdir/logstash.jar agent -f

$workdir/conf/"$appname"_agent.conf &

else

java

-jar $workdir/logstash.jar agent -f

$workdir/conf/"$appname"_"$start_mode".conf &

fi

;;

esac

echo

done

cat /usr/local/logserver/logstash/indexer_startup.sh

#!/bin/bash

#/usr/local/logserver/logstash/startup.sh indexer account activity

databag

/usr/local/logserver/logstash/startup.sh indexer allinone

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值