gpt4 book ai didi

spring-boot - Spring Logback Logger没有进入Elasticsearch

转载 作者:行者123 更新时间:2023-12-03 01:19:18 27 4
gpt4 key购买 nike

我在logback.xml中配置了一个Spring Boot应用程序:

 <property resource="application.properties" />
<property name="LOG_FILE" value="${LOG_FILE:-${LOG_PATH:-${LOG_TEMP:-${java.io.tmpdir:-/tmp}}}/spring.log}"/>
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<property name="FILE_LOG_PATTERN" value="${FILE_LOG_PATTERN:-%d{${LOG_DATEFORMAT_PATTERN:-yyyy-MM-dd HH:mm:ss.SSS}} ${LOG_LEVEL_PATTERN:-%5p} ${PID:- } --- [%t] %-40.40logger{39} : %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>

<logger name="org.apache.catalina.startup.DigesterFactory" level="ERROR"/>
<logger name="org.apache.catalina.util.LifecycleBase" level="ERROR"/>
<logger name="org.apache.coyote.http11.Http11NioProtocol" level="WARN"/>
<logger name="org.apache.sshd.common.util.SecurityUtils" level="WARN"/>
<logger name="org.apache.tomcat.util.net.NioSelectorPool" level="WARN"/>
<logger name="org.eclipse.jetty.util.component.AbstractLifeCycle" level="ERROR"/>
<logger name="org.hibernate.validator.internal.util.Version" level="WARN"/>
<logger name="org.springframework.data.elasticsearch.client.WIRE" level="trace"/>

<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
</encoder>
</appender>
<appender name="loggingLogback" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>logstash.myDomain.com:80</destination>
<!-- encoder is required -->
<encoder class="net.logstash.logback.encoder.LogstashEncoder" />
</appender>
<appender name="accessLogback" class="net.logstash.logback.appender.LogstashAccessTcpSocketAppender">
<destination>logstash.myDomain.com:80</destination>
<!-- encoder is required -->
<encoder class="net.logstash.logback.encoder.LogstashAccessEncoder" />
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE" />
<appender-ref ref="loggingLogback" />
<appender-ref ref="accessLogback" />
</root>

然后,我通过此docker-compose设置通过我的IDE以及其他环境启动我的应用程序:
version: "3.0"

services:
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: always
env_file:
- 'env.grafana'
user: "0"
volumes:
- ./volumes/grafana/data:/var/lib/grafana

elasticsearch:
container_name: elasticsearch
restart: always
image: elastic/elasticsearch:7.6.2
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- ELASTIC_PASSWORD=elasticPAss
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- 9200:9200
volumes:
- ./volumes/elasticsearch/data:/usr/share/elasticsearch/data
- ./volumes/elasticsearch/logs:/usr/share/elasticsearch/logs
ulimits:
memlock:
soft: -1
hard: -1

logstash:
container_name: logstash
restart: always
image: elastic/logstash:7.6.2
volumes:
- ./volumes/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./volumes/logstash/pipeline/jiraElastic.conf:/usr/share/logstash/pipeline/jiraElastic.conf
depends_on:
- elasticsearch

nginx:
image: nginx:latest
container_name: nginx
restart: always
volumes:
- ./volumes/nginx/nginx.conf:/etc/nginx/nginx.conf
- ./volumes/nginx/proxy.conf:/etc/nginx/proxy.conf
- ./volumes/nginx/sites-enabled-dev:/etc/nginx/sites-enabled
- ./volumes/nginx/logs/:/var/log/nginx
depends_on:
- grafana
ports:
- 80:80
- 443:443

注意的配置文件:

nginx.conf
worker_processes auto;

events {
worker_connections 20000;
}

http {

##
# Basic Settings
##

sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_names_hash_bucket_size 64;

include /etc/nginx/mime.types;
include /etc/nginx/proxy.conf;

default_type application/octet-stream;

##
# SSL Settings
##

ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;

##
# Logging Settings
##

access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;

##
# Gzip Settings
##

gzip on;

##
# Virtual Host Configs
##

include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}

stream {
include /etc/nginx/streams-enabled/*;
}

proxy.conf
proxy_redirect          off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_body_buffer_size 128k;
proxy_buffers 32 4k;

启用站点的dev / logstash
server {
listen 80;
server_name logstash.myDomain.com;
keepalive_timeout 70;
allow all;

location / {
proxy_pass http://logstash:5000;
}
}

管道/jiraElastic.conf
 input {
tcp {
port => 5000
codec => "json_lines"
}
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
index => "myIndexName"
}

stdout {
codec => rubydebug
}
}

在我的etc / hosts文件中,我有logstash.myDomain.com指向127.0.0.1

但是,当我在 flex 搜索/ _cat / indeces上触发时,我得到一个空结果,而使用/ _search我得到
{
"took": 31,
"timed_out": false,
"_shards": {
"total": 0,
"successful": 0,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 0,
"relation": "eq"
},
"max_score": 0.0,
"hits": []
}
}

我在nginx的access.logs中看到一些看起来像我的应用程序日志的日志,所以我认为它们已经走得那么远了。例如:
192.168.176.1 - - [06/Apr/2020:14:29:29 +0000] "{\x22@timestamp\x22:\x222020-04-06T16:29:28.657+02:00\x22,\x22@version\x22:\x221\x22,\x22message\x22:\x22System Starting up!\x22,\x22logger_name\x22:\x22com.my.project.utils.SystemUptimeLogger\x22,\x22thread_name\x22:\x22main\x22,\x22level\x22:\x22INFO\x22,\x22level_value\x22:20000}" 400 157 "-" "-"

但是我没有在logstash的控制台中看到任何输出,我的印象是,如果我有rubydebug标志,我应该这样做。
在logstash日志中,我看到它似乎正确地读取了我的配置:
Sending Logstash logs to /usr/share/logstash/logs which is now configured via log4j2.properties
[2020-04-06T14:28:05,468][INFO ][logstash.setting.writabledirectory] Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
[2020-04-06T14:28:05,686][INFO ][logstash.setting.writabledirectory] Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
[2020-04-06T14:28:09,587][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"7.6.2"}
[2020-04-06T14:28:09,874][INFO ][logstash.agent ] No persistent UUID file found. Generating new UUID {:uuid=>"44f03b15-fa7a-497b-81ab-9253333df805", :path=>"/usr/share/logstash/data/uuid"}
[2020-04-06T14:28:33,860][INFO ][org.reflections.Reflections] Reflections took 82 ms to scan 1 urls, producing 20 keys and 40 values
[2020-04-06T14:28:47,341][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2020-04-06T14:28:48,901][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://elasticsearch:9200/"}
[2020-04-06T14:28:49,462][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>7}
[2020-04-06T14:28:49,466][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7}
[2020-04-06T14:28:49,862][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//elasticsearch:9200"]}
[2020-04-06T14:28:50,236][INFO ][logstash.outputs.elasticsearch][main] Using default mapping template
[2020-04-06T14:28:50,632][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2020-04-06T14:28:50,684][WARN ][org.logstash.instrument.metrics.gauge.LazyDelegatingGauge][main] A gauge metric of an unknown type (org.jruby.specialized.RubyArrayOneObject) has been created for key: cluster_uuids. This may result in invalid serialization. It is recommended to log an issue to the responsible developer/development team.
[2020-04-06T14:28:50,708][INFO ][logstash.javapipeline ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>250, "pipeline.sources"=>["/usr/share/logstash/pipeline/jiraElastic.conf", "/usr/share/logstash/pipeline/logstash.conf"], :thread=>"#<Thread:0x5f9bdc74 run>"}
[2020-04-06T14:28:56,466][INFO ][logstash.inputs.beats ][main] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2020-04-06T14:28:58,415][INFO ][logstash.javapipeline ][main] Pipeline started {"pipeline.id"=>"main"}
[2020-04-06T14:28:58,593][INFO ][logstash.inputs.tcp ][main] Starting tcp input listener {:address=>"0.0.0.0:5000", :ssl_enable=>"false"}
[2020-04-06T14:28:58,878][INFO ][org.logstash.beats.Server][main] Starting server on port: 5044
[2020-04-06T14:28:59,851][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2020-04-06T14:29:02,755][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

知道丢失的部分是什么吗? (感觉主要问题在nginx和logstash之间,但是可能是由于logback.xml中配置错误的结果。

最佳答案

所以主要的问题似乎是在我的logstash配置中,我将elasticsearch的索引用作myIndexName

这需要完全小写。

我也从<destination>logstash.myDomain.com:80</destination>删除了端口,但是我怀疑这会有所不同。

关于spring-boot - Spring Logback Logger没有进入Elasticsearch,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/61062176/

27 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com