共计 4469 个字符,预计需要花费 12 分钟才能阅读完成。
前言
最近我的项目须要记录零碎的日志,发现 springcloud 集成 elk(elasticsearch+logstash+kibna)是个不错的计划,于是着手开始搭建环境并记录过程分享给大家。
筹备
首先咱们要装置好 elasticsearch+kibana
以及logstash
, 上面是我相干文章,大家能够看看。
1、装置 elasticsearch+kibana
2、Helm3- 装置带有 ik 分词的 ElasticSearch
3、装置 logstash
4、Java- 实现 ElasticSearch 增删改性能
Logstash 配置
1、引入 jar 包
<dependency> | |
<groupId>net.logstash.logback</groupId> | |
<artifactId>logstash-logback-encoder</artifactId> | |
<version>7.4</version> | |
</dependency> |
2、增加 logback-spring.xml 文件
如下所示:
<?xml version="1.0" encoding="UTF-8"?> | |
<configuration debug="true" scan="true" scanPeriod="10 seconds"> | |
<springProperty scope="context" name="springAppName" source="spring.application.name"/> | |
<springProperty scope="context" name="active" source="spring.profiles.active"/> | |
<!-- 异步发送日志 --> | |
<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender"> | |
<appender-ref ref="LOGSTASH"/> | |
</appender> | |
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> | |
<encoder> | |
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern> | |
</encoder> | |
</appender> | |
<!-- logstash 设置 --> | |
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender"> | |
<!-- <param name="Encoding" value="UTF-8"/>--> | |
<!-- logstash 服务器 --> | |
<destination>xxx.xxx.xxx.xxx:xxxx</destination> | |
<!-- encoder is required --> | |
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"> | |
<providers> | |
<timestamp> | |
<timeZone>UTC</timeZone> | |
</timestamp> | |
<pattern> | |
<pattern> | |
{"active": "${active}", | |
"service": "${springAppName:-}", | |
"timestamp": "%date{ISO8601}", | |
"level": "%level", | |
"thread": "%thread", | |
"logger": "%logger", | |
"message": "%message", | |
"context": "%X" | |
} | |
</pattern> | |
</pattern> | |
</providers> | |
</encoder> | |
<!-- 临界值过滤器,过滤掉低于指定临界值的日志。当日志级别等于或高于临界值时,过滤器返回 NEUTRAL;当日志级别低于临界值时,日志会被回绝,OFF>ERROR>WARN>INFO>DEBUG>TRACE>ALL --> | |
<!-- <filter class="ch.qos.logback.classic.filter.ThresholdFilter">--> | |
<!-- <level>INFO</level>--> | |
<!-- </filter>--> | |
</appender> | |
<!-- 日志输入级别 --> | |
<root level="INFO"> | |
<!-- 增加 logstash 日志输入 --> | |
<appender-ref ref="STDOUT"/> | |
<appender-ref ref="ASYNC"/> | |
</root> | |
</configuration> |
3、运行我的项目查看 logstash 日志和 elasticsearch 日志输入
应用命令查看 logstash 日志
➜ ~ kubectl logs pod/logstash-0 -nlogstash
输入:
{ | |
"level" => "INFO", | |
"context" => "","message"=>"[9958260f-3313-4e5a-9506-24c140c7d6c1] Receive server push request, request = NotifySubscriberRequest, requestId = 35420","thread"=>"nacos-grpc-client-executor-47.97.208.153-30","service"=>"gatewayserver","logger"=>"com.alibaba.nacos.common.remote.client","active"=>"dev","@timestamp"=> 2023-09-27T06:50:25.552Z,"timestamp"=>"2023-09-27 14:50:25,552","@version"=>"1","type"=>"syslog" | |
} | |
{ | |
"level" => "INFO", | |
"context" => "","message"=>"[9958260f-3313-4e5a-9506-24c140c7d6c1] Ack server push request, request = NotifySubscriberRequest, requestId = 35420","thread"=>"nacos-grpc-client-executor-47.97.208.153-30","service"=>"gatewayserver","logger"=>"com.alibaba.nacos.common.remote.client","active"=>"dev","@timestamp"=> 2023-09-27T06:50:25.555Z,"timestamp"=>"2023-09-27 14:50:25,555","@version"=>"1","type"=>"syslog" | |
} | |
{ | |
"level" => "INFO", | |
"context" => "","message"=>"new ips(1) service: DEFAULT_GROUP@@gatewayserver -> [{\"ip\":\"172.16.12.11\",\"port\":7777,\"weight\":1.0,\"healthy\":true,\"enabled\":true,\"ephemeral\":true,\"clusterName\":\"DEFAULT\",\"serviceName\":\"DEFAULT_GROUP@@gatewayserver\",\"metadata\":{\"preserved.register.source\":\"SPRING_CLOUD\"},\"instanceHeartBeatInterval\":5000,\"instanceHeartBeatTimeOut\":15000,\"ipDeleteTimeout\":30000}]","thread"=>"nacos-grpc-client-executor-47.97.208.153-30","service"=>"gatewayserver","logger"=>"com.alibaba.nacos.client.naming","active"=>"dev","@timestamp"=> 2023-09-27T06:50:25.554Z,"timestamp"=>"2023-09-27 14:50:25,554","@version"=>"1","type"=>"syslog" | |
} | |
{ | |
"level" => "INFO", | |
"context" => "","message"=>"current ips:(1) service: DEFAULT_GROUP@@gatewayserver -> [{\"ip\":\"172.16.12.11\",\"port\":7777,\"weight\":1.0,\"healthy\":true,\"enabled\":true,\"ephemeral\":true,\"clusterName\":\"DEFAULT\",\"serviceName\":\"DEFAULT_GROUP@@gatewayserver\",\"metadata\":{\"preserved.register.source\":\"SPRING_CLOUD\"},\"instanceHeartBeatInterval\":5000,\"instanceHeartBeatTimeOut\":15000,\"ipDeleteTimeout\":30000}]","thread"=>"nacos-grpc-client-executor-47.97.208.153-30","service"=>"gatewayserver","logger"=>"com.alibaba.nacos.client.naming","active"=>"dev","@timestamp"=> 2023-09-27T06:50:25.554Z,"timestamp"=>"2023-09-27 14:50:25,554","@version"=>"1","type"=>"syslog" | |
} |
elasticsearch 日志
咱们要在 kibana 中增加,操作步骤如下所示:
增加实现之后,咱们点击查看日志,如下所示:
这样就实现了 elk 的日志零碎搭建。
总结
1、在搭建 logstash 的时候呈现过问题,有趣味的能够看看 helm3 装置了 logstash 配置好了 logback,然而日志记录始终不对,如何解决?
2、elasticsearch+kibana
以及集成到 springcloud
中,筹备工作外面曾经列举了相干文章及操作,大家能够看看。
正文完