bin/logstash -f config/logstash-sample.conf
[root@node-zwf ~]# chown -R logstash:elk /home/logstash
[root@node-zwf ~]# su logstash
[logstash@node-zwf root]$ cd /home/logstash/logstash-7.8.0
[logstash@node-zwf logstash-7.8.0]$ bin/logstash -f config/logstash-sample.conf
Sending Logstash logs to /home/logstash/logstash-7.8.0/logs which is now configured via log4j2.properties
[2021-04-11T10:23:08,660][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2021-04-11T10:23:09,554][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.8.0", "jruby.version"=>"jruby 9.2.11.1 (2.5.7) 2020-03-25 b1f55b1a40 Java HotSpot(TM) 64-Bit Server VM 25.281-b09 on 1.8.0_281-b09 +indy +jit [linux-x86_64]"}
[2021-04-11T10:23:15,384][INFO ][org.reflections.Reflections] Reflections took 199 ms to scan 1 urls, producing 21 keys and 41 values
[2021-04-11T10:23:20,777][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://192.168.10.233:9200/]}}
[2021-04-11T10:23:21,548][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:21,702][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//192.168.10.233:9200"]}
[2021-04-11T10:23:22,165][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>500, "pipeline.sources"=>["/home/logstash/logstash-7.8.0/config/logstash-sample.conf"], :thread=>"#<Thread:0x553c1098 run>"}
[2021-04-11T10:23:26,635][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2021-04-11T10:23:26,795][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:27,149][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2021-04-11T10:23:27,642][INFO ][org.apache.kafka.clients.consumer.ConsumerConfig][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [192.168.10.233:9092]
check.crcs = true
client.dns.lookup = default
client.id = logstash-0
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = true
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = logstash
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 50
reconnect.backoff.ms = 50
request.timeout.ms = 40000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
[2021-04-11T10:23:28,315][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] Kafka version: 2.4.1
[2021-04-11T10:23:28,320][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] Kafka commitId: c57222ae8cd7866b
[2021-04-11T10:23:28,329][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] Kafka startTimeMs: 1618161808302
[2021-04-11T10:23:28,529][INFO ][org.apache.kafka.clients.consumer.KafkaConsumer][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Subscribed to topic(s): fx-topic
[2021-04-11T10:23:30,209][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2021-04-11T10:23:31,413][INFO ][org.apache.kafka.clients.Metadata][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Cluster ID: F02_yzTMQXqDqFttd5m9rw
[2021-04-11T10:23:31,428][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Discovered group coordinator node-zwf:9092 (id: 2147483646 rack: null)
[2021-04-11T10:23:31,474][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-04-11T10:23:31,607][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-04-11T10:23:31,680][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Finished assignment for group at generation 13: {logstash-0-b99bad1b-82e4-4f00-af29-9fb60f5a34a6=Assignment(partitions=[fx-topic-0])}
[2021-04-11T10:23:31,932][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:31,960][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Successfully joined group with generation 13
[2021-04-11T10:23:31,997][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Adding newly assigned partitions: fx-topic-0
[2021-04-11T10:23:32,049][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Setting offset for partition fx-topic-0 to the committed offset FetchPosition{offset=19, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=node-zwf:9092 (id: 1 rack: null), epoch=0}}
[2021-04-11T10:23:34,732][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,734][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,746][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,809][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,830][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,843][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>"1"}
[2021-04-11T10:23:34,853][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,860][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,880][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>"2"}
[2021-04-11T10:23:34,892][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,925][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:34,940][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>"3"}
[2021-04-11T10:23:34,980][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>""}
[2021-04-11T10:23:36,967][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:41,994][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:47,029][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:52,055][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:23:57,089][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:24:02,484][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
^C[2021-04-11T10:24:05,553][WARN ][logstash.runner ] SIGINT received. Shutting down.
[2021-04-11T10:24:05,931][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Revoke previously assigned partitions fx-topic-0
[2021-04-11T10:24:05,935][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Member logstash-0-b99bad1b-82e4-4f00-af29-9fb60f5a34a6 sending LeaveGroup request to coordinator node-zwf:9092 (id: 2147483646 rack: null) due to the consumer is being closed
^C[2021-04-11T10:24:06,944][FATAL][logstash.runner ] SIGINT received. Terminating immediately..
[2021-04-11T10:24:07,165][ERROR][org.logstash.Logstash ] org.jruby.exceptions.ThreadKill
[logstash@node-zwf logstash-7.8.0]$
[logstash@node-zwf logstash-7.8.0]$
[logstash@node-zwf logstash-7.8.0]$
[logstash@node-zwf logstash-7.8.0]$
[logstash@node-zwf logstash-7.8.0]$
[logstash@node-zwf logstash-7.8.0]$ bin/logstash -f config/logstash-sample.conf
Sending Logstash logs to /home/logstash/logstash-7.8.0/logs which is now configured via log4j2.properties
[2021-04-11T10:26:08,743][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2021-04-11T10:26:08,900][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.8.0", "jruby.version"=>"jruby 9.2.11.1 (2.5.7) 2020-03-25 b1f55b1a40 Java HotSpot(TM) 64-Bit Server VM 25.281-b09 on 1.8.0_281-b09 +indy +jit [linux-x86_64]"}
[2021-04-11T10:26:11,704][INFO ][org.reflections.Reflections] Reflections took 62 ms to scan 1 urls, producing 21 keys and 41 values
[2021-04-11T10:26:14,405][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://192.168.10.233:9200/]}}
[2021-04-11T10:26:15,370][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:15,485][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//192.168.10.233:9200"]}
[2021-04-11T10:26:15,936][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>500, "pipeline.sources"=>["/home/logstash/logstash-7.8.0/config/logstash-sample.conf"], :thread=>"#<Thread:0x15a976fc run>"}
[2021-04-11T10:26:19,480][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2021-04-11T10:26:19,904][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2021-04-11T10:26:20,020][INFO ][org.apache.kafka.clients.consumer.ConsumerConfig][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [192.168.10.233:9092]
check.crcs = true
client.dns.lookup = default
client.id = logstash-0
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = true
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = logstash
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 50
reconnect.backoff.ms = 50
request.timeout.ms = 40000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
[2021-04-11T10:26:20,300][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] Kafka version: 2.4.1
[2021-04-11T10:26:20,305][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] Kafka commitId: c57222ae8cd7866b
[2021-04-11T10:26:20,310][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] Kafka startTimeMs: 1618161980296
[2021-04-11T10:26:20,382][INFO ][org.apache.kafka.clients.consumer.KafkaConsumer][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Subscribed to topic(s): fx-topic
[2021-04-11T10:26:20,599][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:21,109][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2021-04-11T10:26:21,445][INFO ][org.apache.kafka.clients.Metadata][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Cluster ID: F02_yzTMQXqDqFttd5m9rw
[2021-04-11T10:26:21,457][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Discovered group coordinator node-zwf:9092 (id: 2147483646 rack: null)
[2021-04-11T10:26:21,488][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-04-11T10:26:21,566][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-04-11T10:26:21,607][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Finished assignment for group at generation 15: {logstash-0-200ab16e-b34c-4c59-8193-4c10106bcfbb=Assignment(partitions=[fx-topic-0])}
[2021-04-11T10:26:21,647][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Successfully joined group with generation 15
[2021-04-11T10:26:21,686][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Adding newly assigned partitions: fx-topic-0
[2021-04-11T10:26:21,737][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Setting offset for partition fx-topic-0 to the committed offset FetchPosition{offset=32, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=node-zwf:9092 (id: 1 rack: null), epoch=0}}
[2021-04-11T10:26:25,638][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:30,662][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:35,687][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:40,716][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:45,742][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:50,776][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:26:51,266][WARN ][logstash.filters.json ][main][a9a9d92e47aa532e075e38f7ae3e35c1825d2812c3cba4aa2561b1700888664c] Parsed JSON object/hash requires a target configuration option {:source=>"message", :raw=>"4"}
[2021-04-11T10:26:55,813][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:27:00,838][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
[2021-04-11T10:27:05,861][WARN ][logstash.outputs.elasticsearch][main] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://192.168.10.233:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://192.168.10.233:9200/'"}
^C[2021-04-11T10:27:08,489][WARN ][logstash.runner ] SIGINT received. Shutting down.
[2021-04-11T10:27:08,821][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Revoke previously assigned partitions fx-topic-0
[2021-04-11T10:27:08,826][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][b03c17c71bbf3c0d1d67a15c2dfce0335471ac46a66ac67d1de693717bd397b1] [Consumer clientId=logstash-0, groupId=logstash] Member logstash-0-200ab16e-b34c-4c59-8193-4c10106bcfbb sending LeaveGroup request to coordinator node-zwf:9092 (id: 2147483646 rack: null) due to the consumer is being closed
^C[2021-04-11T10:27:08,911][FATAL][logstash.runner ] SIGINT received. Terminating immediately..
[2021-04-11T10:27:09,168][ERROR][org.logstash.Logstash ] org.jruby.exceptions.ThreadKill
[logstash@node-zwf logstash-7.8.0]$ ^C
[logstash@node-zwf logstash-7.8.0]$
logstash 输出到 es 的用户密码没有配置错误:
https://blog.csdn.net/spencer_tseng/article/details/115610919
配置完成,总算正确了:
[logstash@node-zwf logstash-7.8.0]$ bin/logstash -f config/logstash-sample.conf
Sending Logstash logs to /home/logstash/logstash-7.8.0/logs which is now configured via log4j2.properties
[2021-04-11T10:57:54,728][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2021-04-11T10:57:54,976][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.8.0", "jruby.version"=>"jruby 9.2.11.1 (2.5.7) 2020-03-25 b1f55b1a40 Java HotSpot(TM) 64-Bit Server VM 25.281-b09 on 1.8.0_281-b09 +indy +jit [linux-x86_64]"}
[2021-04-11T10:58:01,923][INFO ][org.reflections.Reflections] Reflections took 941 ms to scan 1 urls, producing 21 keys and 41 values
[2021-04-11T10:58:05,828][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elastic:xxxxxx@192.168.10.233:9200/]}}
[2021-04-11T10:58:07,274][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://elastic:xxxxxx@192.168.10.233:9200/"}
[2021-04-11T10:58:07,742][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>7}
[2021-04-11T10:58:07,782][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2021-04-11T10:58:09,991][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//192.168.10.233:9200"]}
[2021-04-11T10:58:10,340][INFO ][logstash.outputs.elasticsearch][main] Using default mapping template
[2021-04-11T10:58:10,459][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>500, "pipeline.sources"=>["/home/logstash/logstash-7.8.0/config/logstash-sample.conf"], :thread=>"#<Thread:0x5d505b3f run>"}
[2021-04-11T10:58:10,894][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2021-04-11T10:58:14,834][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2021-04-11T10:58:15,261][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2021-04-11T10:58:15,419][INFO ][org.apache.kafka.clients.consumer.ConsumerConfig][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [192.168.10.233:9092]
check.crcs = true
client.dns.lookup = default
client.id = logstash-0
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = true
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = logstash
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 50
reconnect.backoff.ms = 50
request.timeout.ms = 40000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
[2021-04-11T10:58:15,827][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] Kafka version: 2.4.1
[2021-04-11T10:58:15,834][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] Kafka commitId: c57222ae8cd7866b
[2021-04-11T10:58:15,843][INFO ][org.apache.kafka.common.utils.AppInfoParser][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] Kafka startTimeMs: 1618163895819
[2021-04-11T10:58:15,918][INFO ][org.apache.kafka.clients.consumer.KafkaConsumer][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Subscribed to topic(s): fx-topic
[2021-04-11T10:58:16,866][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2021-04-11T10:58:17,233][INFO ][org.apache.kafka.clients.Metadata][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Cluster ID: F02_yzTMQXqDqFttd5m9rw
[2021-04-11T10:58:17,240][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Discovered group coordinator node-zwf:9092 (id: 2147483646 rack: null)
[2021-04-11T10:58:17,258][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-04-11T10:58:17,286][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] (Re-)joining group
[2021-04-11T10:58:17,306][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Finished assignment for group at generation 19: {logstash-0-3cb5a6c5-8a2b-45d8-8dbd-7d679de0539a=Assignment(partitions=[fx-topic-0])}
[2021-04-11T10:58:17,318][INFO ][org.apache.kafka.clients.consumer.internals.AbstractCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Successfully joined group with generation 19
[2021-04-11T10:58:17,337][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Adding newly assigned partitions: fx-topic-0
[2021-04-11T10:58:17,374][INFO ][org.apache.kafka.clients.consumer.internals.ConsumerCoordinator][main][ca38ba36cd7bb73d2651e24d584e2023e76fa16ed8a9549928a960c32993e4a6] [Consumer clientId=logstash-0, groupId=logstash] Setting offset for partition fx-topic-0 to the committed offset FetchPosition{offset=35, offsetEpoch=Optional[0], currentLeader=LeaderAndEpoch{leader=node-zwf:9092 (id: 1 rack: null), epoch=0}}