Logstash脚本无法将数据写入InfluxDB

时间:2018-01-30 17:07:57

标签: logstash influxdb

我想:  1.从索引中提取Logstash的监控指标。  2.使用Logstash脚本过滤/处理。  3.写入InfluxDB。

Logstash脚本如下所示:

input {
    #logstash Metrics
    http_poller {
        urls => {
            logstash_metrics => {
                method => post
                url => "http://elasticco-###############.com:80//%3C.monitoring-logstash-6-%7Bnow%2Fd%7D%3E//_search?filter_path=aggregations.source_node_ip.buckets"
                headers => { Accept => "application/json" }
                body => '{"query":{"bool":{"must":[{"range":{"timestamp":{"gte":"now-10m","format":"epoch_millis"}}}]}},"size":0,"aggregations":{"source_node_ip":{"terms":{"field":"source_node.ip","size":20},"aggregations":{"data":{"top_hits":{"size":1,"sort":[{"timestamp":{"order":"desc"}}]}}}}}}'
                auth => { user => "elastic" password => "changeme" }
            }
        }
        codec => json
        schedule => { every => "10m"}
        type => "logstash_metrics"
        add_field => {
            "region" => "west"
        }
    }
}

filter {
    if [type] == "logstash_metrics"{
        mutate {
            rename => {
                "[aggregations][source_node_ip][buckets]" => "root"
            }
        }
        split { field => "[root]" }
        mutate {
            rename => {
                "[root][data][hits][hits]" => "main_event"
            }
        }
        split { field => "[main_event]" }
        mutate {
            rename => {
                "[main_event][_source][cluster_uuid]" => "cluster_uuid"
                "[main_event][_source][source_node][ip]" => "source_node_ip"
                "[main_event][_source][source_node][host]" => "source_node_host"
                                "[main_event][_source][source_node][uuid]" => "source_node_uuid"
                "[main_event][_source][logstash_stats][jvm][mem][heap_used_percent]" => "logstash_stats_jvm_mem_heap_used_percent"
                "[main_event][_source][logstash_stats][jvm][mem][heap_used_in_bytes]" => "logstash_stats_jvm_mem_heap_used_in_bytes"
                "[main_event][_source][logstash_stats][jvm][mem][heap_max_in_bytes]" => "logstash_stats_jvm_mem_heap_max_in_bytes"
                "[main_event][_source][logstash_stats][jvm][uptime_in_millis]" => "logstash_stats_jvm_uptime_in_millis"
                "[main_event][_source][logstash_stats][jvm][gc][collectors][young][collection_time_in_millis]" => "logstash_stats_jvm_gc_collectors_young_collection_time_in_millis"
                "[main_event][_source][logstash_stats][jvm][gc][collectors][young][collection_count]" => "logstash_stats_jvm_gc_collectors_young_collection_count"
                "[main_event][_source][logstash_stats][jvm][gc][collectors][old][collection_time_in_millis]" => "logstash_stats_jvm_gc_collectors_old_collection_time_in_millis"
                "[main_event][_source][logstash_stats][jvm][gc][collectors][old][collection_count]" => "logstash_stats_jvm_gc_collectors_old_collection_count"
                "[main_event][_source][logstash_stats][logstash][pipeline][batch_size]" => "logstash_stats_logstash_pipeline_batch_size"
                "[main_event][_source][logstash_stats][logstash][pipeline][workers]" => "logstash_stats_logstash_pipeline_workers"
                "[main_event][_source][logstash_stats][logstash][status]" => "logstash_stats_logstash_status"
                "[main_event][_source][logstash_stats][logstash][host]" => "logstash_stats_logstash_host"
                "[main_event][_source][logstash_stats][process][max_file_descriptors]" => "logstash_stats_process_max_file_descriptors"
                "[main_event][_source][logstash_stats][process][cpu][percent]" => "logstash_stats_process_cpu_percent"
                "[main_event][_source][logstash_stats][process][open_file_descriptors]" => "logstash_stats_process_open_file_descriptors"
                "[main_event][_source][logstash_stats][os][cpu][load_average][5m]" => "logstash_stats_os_cpu_load_average_5m"
                "[main_event][_source][logstash_stats][os][cpu][load_average][15m]" => "logstash_stats_os_cpu_load_average_15m"
                "[main_event][_source][logstash_stats][os][cpu][load_average][1m]" => "logstash_stats_os_cpu_load_average_1m"
                "[main_event][_source][logstash_stats][events][filtered]" => "logstash_stats_events_filtered"
                "[main_event][_source][logstash_stats][events][in]" => "logstash_stats_events_in"
                "[main_event][_source][logstash_stats][events][duration_in_millis]" => "logstash_stats_events_duration_in_millis"
                "[main_event][_source][logstash_stats][events][out]" => "logstash_stats_events_out"
                "[main_event][_source][logstash_stats][queue][type]" => "logstash_stats_queue_type"
                "[main_event][_source][logstash_stats][queue][events_count]" => "logstash_stats_queue_events_count"
                "[main_event][_source][logstash_stats][reloads][failures]" => "logstash_stats_reloads_failures"
                "[main_event][_source][logstash_stats][reloads][successes]" => "logstash_stats_reloads_successes"
                "[main_event][_source][logstash_stats][timestamp]" => "timestamp"
            }
        }
        mutate {
            remove_field => [ "root", "aggregations", "@timestamp", "@version" , "main_event"]
        }
    }
}

output {
if [type] == "logstash_metrics" {
  stdout { codec => rubydebug }
  influxdb {
    host => "influx-qa-write.##########.com"
    port => "8086"
    user => "gt######00"
    password => "hg3########1"
    db => "logstash_statistics"
    measurement => "logstash_health_test1"
    data_points => {
    "logstash_stats_events_in" => "%{logstash_stats_events_in}"
    "logstash_stats_logstash_status" => "%{logstash_stats_logstash_status}"
    "logstash_stats_logstash_pipeline_workers" => "%{logstash_stats_logstash_pipeline_workers}"
    "logstash_stats_events_out" => "%{logstash_stats_events_out}"
    "logstash_stats_events_duration_in_millis" => "%{logstash_stats_events_duration_in_millis}"
    "logstash_stats_process_cpu_percent" => "%{logstash_stats_process_cpu_percent}"
    "logstash_stats_jvm_mem_heap_used_in_bytes" => "%{logstash_stats_jvm_mem_heap_used_in_bytes}"
    "logstash_stats_process_open_file_descriptors" => "%{logstash_stats_process_open_file_descriptors}"
    "logstash_stats_jvm_uptime_in_millis" => "%{logstash_stats_jvm_uptime_in_millis}"
    "logstash_stats_events_filtered" => "%{logstash_stats_events_filtered}"
    "logstash_stats_jvm_mem_heap_used_percent" => "%{logstash_stats_jvm_mem_heap_used_percent}"
    "logstash_stats_jvm_gc_collectors_young_collection_time_in_millis" => "%{logstash_stats_jvm_gc_collectors_young_collection_time_in_millis}"
    "source_node_ip" => "%{source_node_ip}"
    "logstash_stats_queue_events_count" => "%{logstash_stats_queue_events_count}"
    "logstash_stats_reloads_failures" => "%{logstash_stats_reloads_failures}"
    "logstash_stats_logstash_host" => "%{logstash_stats_logstash_host}"
    "logstash_stats_jvm_gc_collectors_young_collection_count" => "%{logstash_stats_jvm_gc_collectors_young_collection_count}"
    "logstash_stats_os_cpu_load_average_5m" => "%{logstash_stats_os_cpu_load_average_5m}"
    "logstash_stats_jvm_gc_collectors_old_collection_time_in_millis" => "%{logstash_stats_jvm_gc_collectors_old_collection_time_in_millis}"
    "source_node_uuid" => "%{source_node_uuid}"
    "logstash_stats_os_cpu_load_average_15m" => "%{logstash_stats_os_cpu_load_average_15m}"
    "logstash_stats_reloads_successes" => "%{logstash_stats_reloads_successes}"
    "logstash_stats_logstash_pipeline_batch_size" => "%{logstash_stats_logstash_pipeline_batch_size}"
    "source_node_host" => "%{source_node_host}"
    "logstash_stats_jvm_gc_collectors_old_collection_count" => "%{logstash_stats_jvm_gc_collectors_old_collection_count}"
    "logstash_stats_process_max_file_descriptors" => "%{logstash_stats_process_max_file_descriptors}"
    "logstash_stats_jvm_mem_heap_max_in_bytes" => "%{logstash_stats_jvm_mem_heap_max_in_bytes}"
    "cluster_uuid" => "%{cluster_uuid}"
    "logstash_stats_queue_type" => "%{logstash_stats_queue_type}"
    "logstash_stats_os_cpu_load_average_1m" => "%{logstash_stats_os_cpu_load_average_1m}"
    "region" => "%{region}"
            }
    coerce_values => {
    "logstash_stats_logstash_pipeline_workers" => "integer"
    "logstash_stats_events_in" => "integer"
    "logstash_stats_logstash_status" => "string"
    "logstash_stats_events_out" => "integer"
    "logstash_stats_events_duration_in_millis" => "integer"
    "logstash_stats_process_cpu_percent" => "float"
    "logstash_stats_jvm_mem_heap_used_in_bytes" => "integer"
    "logstash_stats_process_open_file_descriptors" => "integer"
    "logstash_stats_jvm_uptime_in_millis" => "integer"
    "logstash_stats_events_filtered" => "integer"
    "logstash_stats_jvm_mem_heap_used_percent" => "float"
    "logstash_stats_jvm_gc_collectors_young_collection_time_in_millis" => "integer"
    "source_node_ip" => "string"
    "logstash_stats_queue_events_count" => "integer"
    "logstash_stats_reloads_failures" => "integer"
    "logstash_stats_logstash_host" => "string"
    "logstash_stats_jvm_gc_collectors_young_collection_count" => "integer"
    "logstash_stats_os_cpu_load_average_5m" => "float"
    "logstash_stats_jvm_gc_collectors_old_collection_time_in_millis" => "integer"
    "source_node_uuid" => "string"
    "logstash_stats_os_cpu_load_average_15m" => "float"
    "logstash_stats_reloads_successes" => "integer"
    "logstash_stats_logstash_pipeline_batch_size" => "integer"
    "source_node_host" => "string"
    "logstash_stats_jvm_gc_collectors_old_collection_count" => "integer"
    "logstash_stats_process_max_file_descriptors" => "integer"
    "logstash_stats_jvm_mem_heap_max_in_bytes" => "integer"
    "cluster_uuid" => "string"
    "logstash_stats_queue_type" => "string"
    "region" => "string"
            }
    send_as_tags => ["region","source_node_uuid"]
    flush_size => 3000
    idle_flush_time => 1
    retention_policy => "rp_400d"
  }
    stdout {codec => rubydebug }
}

}

到控制台(stdout)的示例输出看起来很好并且符合预期:

{
                                            "logstash_stats_events_in" => 621,
                                      "logstash_stats_logstash_status" => "green",
                            "logstash_stats_logstash_pipeline_workers" => 16,
                                           "logstash_stats_events_out" => 621,
                            "logstash_stats_events_duration_in_millis" => 4539,
                                  "logstash_stats_process_cpu_percent" => 0,
                           "logstash_stats_jvm_mem_heap_used_in_bytes" => 170390792,
                        "logstash_stats_process_open_file_descriptors" => 259,
                                                                "type" => "logstash_metrics",
                                 "logstash_stats_jvm_uptime_in_millis" => 310770160,
                                      "logstash_stats_events_filtered" => 621,
                            "logstash_stats_jvm_mem_heap_used_percent" => 0,
    "logstash_stats_jvm_gc_collectors_young_collection_time_in_millis" => 21586,
                                                      "source_node_ip" => "10.187.8.207",
                                   "logstash_stats_queue_events_count" => 0,
                                     "logstash_stats_reloads_failures" => 0,
                                                           "timestamp" => "2018-01-30T15:56:18.270Z",
                                        "logstash_stats_logstash_host" => "ip-187-7-147.dqa.capitalone.com",
             "logstash_stats_jvm_gc_collectors_young_collection_count" => 487,
                               "logstash_stats_os_cpu_load_average_5m" => 0.19,
      "logstash_stats_jvm_gc_collectors_old_collection_time_in_millis" => 124,
                                                    "source_node_uuid" => "VmarsH2-RMO0HY2u2-A9EQ",
                              "logstash_stats_os_cpu_load_average_15m" => 0.13,
                                    "logstash_stats_reloads_successes" => 0,
                         "logstash_stats_logstash_pipeline_batch_size" => 125,
                                     "source_node_host" => "10.187.8.207",
               "logstash_stats_jvm_gc_collectors_old_collection_count" => 1,
                         "logstash_stats_process_max_file_descriptors" => 16384,
                            "logstash_stats_jvm_mem_heap_max_in_bytes" => 32098877440,
                                                        "cluster_uuid" => "LkLw_ASTR7CVQAaX1IzDgg",
                                           "logstash_stats_queue_type" => "memory",
                                                              "region" => "west",
                               "logstash_stats_os_cpu_load_average_1m" => 0.06

(已成功生成格式化输出)

但是上面的脚本无法将此内容写入Influx,错误日志显示:

09:56:25.658 [[main]>worker0] DEBUG logstash.outputs.influxdb - Influxdb output: Received event: %{host} %{message}
Exception in thread "[main]>worker0" java.io.IOException: fails
        at org.logstash.Event.getTimestamp(Event.java:140)
        at org.logstash.ext.JrubyEventExtLibrary$RubyEvent.ruby_timestamp(JrubyEventExtLibrary.java:289)
        at org.logstash.ext.JrubyEventExtLibrary$RubyEvent$INVOKER$i$0$0$ruby_timestamp.call(JrubyEventExtLibrary$RubyEvent$INVOKER$i$0$0$ruby_ti
mestamp.gen)
        at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:306)
        at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:136)
        at org.jruby.ast.CallNoArgNode.interpret(CallNoArgNode.java:60)
        at org.jruby.ast.FCallTwoArgNode.interpret(FCallTwoArgNode.java:38)
        at org.jruby.ast.LocalAsgnNode.interpret(LocalAsgnNode.java:123)
        at org.jruby.ast.NewlineNode.interpret(NewlineNode.java:105)

        at org.jruby.ast.BlockNode.interpret(BlockNode.java:71)
        at org.jruby.evaluator.ASTInterpreter.INTERPRET_METHOD(ASTInterpreter.java:74)
        at org.jruby.internal.runtime.methods.InterpretedMethod.call(InterpretedMethod.java:182)
        at org.jruby.internal.runtime.methods.DefaultMethod.call(DefaultMethod.java:203)
        at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:326)
        at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:170)
        at org.jruby.ast.FCallOneArgNode.interpret(FCallOneArgNode.java:36)
        at org.jruby.ast.NewlineNode.interpret(NewlineNode.java:105)
        at org.jruby.evaluator.ASTInterpreter.INTERPRET_BLOCK(ASTInterpreter.java:112)
        at org.jruby.runtime.Interpreted19Block.evalBlockBody(Interpreted19Block.java:206)
        at org.jruby.runtime.Interpreted19Block.yield(Interpreted19Block.java:157)
        at org.jruby.runtime.Block.yield(Block.java:142)
        at org.jruby.RubyArray.eachCommon(RubyArray.java:1606)
        at org.jruby.RubyArray.each(RubyArray.java:1613)

更新:我的logstash能够与InfluxDB通信(其他脚本工作正常),我正在使用的环境版本是:Logstash 5.4,InfluxDB 1.4.2,Java 8(64位) ,logstash-output-influxdb 5.0.3(输出插件),Windows 7企业版(64位)。

有人能说出这里出了什么问题吗?如果您需要任何进一步的信息,请告诉我。

谢谢!

0 个答案:

没有答案