在上一篇博客中,我是将我的字符中的时间类型赋值给了@timestamp,但是有些时候是需要保留该字段的额真实值的,所以完全可以进行下面的配置将自己转化过来的时间存储到指定的field中,配置如下:
input{
file{
path => "/home/hadoop1/bms/mylog/http.log"
start_position => "beginning"
}
}
filter{
grok{
patterns_dir => "./patterns"
match => { "message" => ["%{IP:source_Ip},%{NUMBER:source_Port},%{IP:dest_Ip},%{NUMBER:dest_Port},%{MYPATTERN:create_Time}"]}
}
date {
match => [ "create_Time", "yyyy-MM-dd HH:mm:ss:ssssss" ]
target => "begin_Time"
add_tag => [ "tmatch" ]
}
mutate {
convert => { "dest_Port" => "integer" }
convert => { "source_Port" => "integer" }
}
}
output {
elasticsearch {
host => "localhost"
}
}
经过上面的配置,我将转化过来的时间类型存储在了begin_Time字段里面,实验结果如下:
[hadoop1@slave2 conf]$ curl 'localhost:9200/logstash-2015.11.03/_search?pretty'
{
"took" : 5,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 1,
"max_score" : 1.0,
"hits" : [ {
"_index" : "logstash-2015.11.03",
"_type" : "logs",
"_id" : "AVDNIYwTGyEV_57_F995",
"_score" : 1.0,
"_source":{"message":"1.1.1.1,23,2.2.2.2,223,2015-03-03 12:12:12:000000","@version":"1","@timestamp":"2015-11-03T11:35:38.945Z","host":"slave2","path":"/home/hadoop1/bms/mylog/http.log","source_Ip":"1.1.1.1","source_Port":23,"dest_Ip":"2.2.2.2","dest_Port":223,"create_Time":"2015-03-03 12:12:12:000000","begin_Time":"2015-03-03T04:12:00.000Z","tags":["tmatch"]}
} ]
}
}