关于java:SpringBoot-Kafka-ELK-完成海量日志收集超详细

40次阅读

共计 11479 个字符,预计需要花费 29 分钟才能阅读完成。

整体流程大略如下:

服务器筹备

在这先列出各服务器节点,不便同学们在下文中对照节点查看相应内容

SpringBoot 我的项目筹备

引入 log4j2 替换 SpringBoot 默认 log,demo 我的项目构造如下:

pom

<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
        <!--  排除 spring-boot-starter-logging -->
        <exclusions>
            <exclusion>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-starter-logging</artifactId>
            </exclusion>
        </exclusions>
    </dependency> 
 <!-- log4j2 -->
 <dependency>
     <groupId>org.springframework.boot</groupId>
     <artifactId>spring-boot-starter-log4j2</artifactId>
 </dependency> 
   <dependency>
     <groupId>com.lmax</groupId>
     <artifactId>disruptor</artifactId>
     <version>3.3.4</version>
   </dependency> 
</dependencies> 

log4j2.xml

<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="INFO" schema="Log4J-V2.0.xsd" monitorInterval="600" >
    <Properties>
        <Property name="LOG_HOME">logs</Property>
        <property name="FILE_NAME">collector</property>
        <property name="patternLayout">[%d{yyyy-MM-dd'T'HH:mm:ss.SSSZZ}] [%level{length=5}] [%thread-%tid] [%logger] [%X{hostName}] [%X{ip}] [%X{applicationName}] [%F,%L,%C,%M] [%m] ## '%ex'%n</property>
    </Properties>
    <Appenders>
        <Console name="CONSOLE" target="SYSTEM_OUT">
            <PatternLayout pattern="${patternLayout}"/>
        </Console>  
        <RollingRandomAccessFile name="appAppender" fileName="${LOG_HOME}/app-${FILE_NAME}.log" filePattern="${LOG_HOME}/app-${FILE_NAME}-%d{yyyy-MM-dd}-%i.log" >
          <PatternLayout pattern="${patternLayout}" />
          <Policies>
              <TimeBasedTriggeringPolicy interval="1"/>
              <SizeBasedTriggeringPolicy size="500MB"/>
          </Policies>
          <DefaultRolloverStrategy max="20"/>         
        </RollingRandomAccessFile>
        <RollingRandomAccessFile name="errorAppender" fileName="${LOG_HOME}/error-${FILE_NAME}.log" filePattern="${LOG_HOME}/error-${FILE_NAME}-%d{yyyy-MM-dd}-%i.log" >
          <PatternLayout pattern="${patternLayout}" />
          <Filters>
              <ThresholdFilter level="warn" onMatch="ACCEPT" onMismatch="DENY"/>
          </Filters>              
          <Policies>
              <TimeBasedTriggeringPolicy interval="1"/>
              <SizeBasedTriggeringPolicy size="500MB"/>
          </Policies>
          <DefaultRolloverStrategy max="20"/>         
        </RollingRandomAccessFile>            
    </Appenders>
    <Loggers>
        <!-- 业务相干 异步 logger -->
        <AsyncLogger name="com.bfxy.*" level="info" includeLocation="true">
          <AppenderRef ref="appAppender"/>
        </AsyncLogger>
        <AsyncLogger name="com.bfxy.*" level="info" includeLocation="true">
          <AppenderRef ref="errorAppender"/>
        </AsyncLogger>       
        <Root level="info">
            <Appender-Ref ref="CONSOLE"/>
            <Appender-Ref ref="appAppender"/>
            <AppenderRef ref="errorAppender"/>
        </Root>         
    </Loggers>
</Configuration>

IndexController

测试 Controller,用以打印日志进行调试

@Slf4j
@RestController
public class IndexController {@RequestMapping(value = "/index")
 public String index() {InputMDC.putMDC();
  
  log.info("我是一条 info 日志");
  
  log.warn("我是一条 warn 日志");

  log.error("我是一条 error 日志");
  
  return "idx";
 }


 @RequestMapping(value = "/err")
 public String err() {InputMDC.putMDC();
  try {int a = 1/0;} catch (Exception e) {log.error("算术异样", e);
  }
  return "err";
 }
 
}

InputMDC

用以获取 log 中的 [%X{hostName}][%X{ip}][%X{applicationName}] 三个字段值

@Component
public class InputMDC implements EnvironmentAware {

 private static Environment environment;
 
 @Override
 public void setEnvironment(Environment environment) {InputMDC.environment = environment;}
 
 public static void putMDC() {MDC.put("hostName", NetUtil.getLocalHostName());
  MDC.put("ip", NetUtil.getLocalIp());
  MDC.put("applicationName", environment.getProperty("spring.application.name"));
 }

}

NetUtil

public class NetUtil {public static String normalizeAddress(String address){String[] blocks = address.split("[:]");
  if(blocks.length > 2){throw new IllegalArgumentException(address + "is invalid");
  }
  String host = blocks[0];
  int port = 80;
  if(blocks.length > 1){port = Integer.valueOf(blocks[1]);
  } else {address += ":"+port; //use default 80} 
  String serverAddr = String.format("%s:%d", host, port);
  return serverAddr;
 }
 
 public static String getLocalAddress(String address){String[] blocks = address.split("[:]");
  if(blocks.length != 2){throw new IllegalArgumentException(address + "is invalid address");
  } 
  String host = blocks[0];
  int port = Integer.valueOf(blocks[1]);
  
  if("0.0.0.0".equals(host)){return String.format("%s:%d",NetUtil.getLocalIp(), port);
  }
  return address;
 }
 
 private static int matchedIndex(String ip, String[] prefix){for(int i=0; i<prefix.length; i++){String p = prefix[i];
   if("*".equals(p)){ //*, assumed to be IP
    if(ip.startsWith("127.") ||
       ip.startsWith("10.") || 
       ip.startsWith("172.") ||
       ip.startsWith("192.")){continue;}
    return i;
   } else {if(ip.startsWith(p)){return i;}
   } 
  }
  
  return -1;
 }
 
 public static String getLocalIp(String ipPreference) {if(ipPreference == null){ipPreference = "*>10>172>192>127";}
  String[] prefix = ipPreference.split("[>]+");
  try {Pattern pattern = Pattern.compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+");
   Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
   String matchedIp = null;
   int matchedIdx = -1;
   while (interfaces.hasMoreElements()) {NetworkInterface ni = interfaces.nextElement();
    Enumeration<InetAddress> en = ni.getInetAddresses(); 
    while (en.hasMoreElements()) {InetAddress addr = en.nextElement();
     String ip = addr.getHostAddress();  
     Matcher matcher = pattern.matcher(ip);
     if (matcher.matches()) {int idx = matchedIndex(ip, prefix);
      if(idx == -1) continue;
      if(matchedIdx == -1){
       matchedIdx = idx;
       matchedIp = ip;
      } else {if(matchedIdx>idx){
        matchedIdx = idx;
        matchedIp = ip;
       }
      }
     } 
    } 
   } 
   if(matchedIp != null) return matchedIp;
   return "127.0.0.1";
  } catch (Exception e) {return "127.0.0.1";}
 }
 
 public static String getLocalIp() {return getLocalIp("*>10>172>192>127");
 }
 
 public static String remoteAddress(SocketChannel channel){SocketAddress addr = channel.socket().getRemoteSocketAddress();
  String res = String.format("%s", addr);
  return res;
 }
 
 public static String localAddress(SocketChannel channel){SocketAddress addr = channel.socket().getLocalSocketAddress();
  String res = String.format("%s", addr);
  return addr==null? res: res.substring(1);
 }
 
 public static String getPid(){RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
        String name = runtime.getName();
        int index = name.indexOf("@");
        if (index != -1) {return name.substring(0, index);
        }
  return null;
 }
 
 public static String getLocalHostName() {
        try {return (InetAddress.getLocalHost()).getHostName();} catch (UnknownHostException uhe) {String host = uhe.getMessage();
            if (host != null) {int colon = host.indexOf(':');
                if (colon > 0) {return host.substring(0, colon);
                }
            }
            return "UnknownHost";
        }
    }
}

启动我的项目,拜访 /index/ero接口,能够看到我的项目中生成了 app-collector.logerror-collector.log两个日志文件:

咱们将 Springboot 服务部署在 192.168.11.31 这台机器上。

Kafka 装置和启用

kafka 下载地址:

http://kafka.apache.org/downl…

kafka 装置步骤:首先 kafka 装置须要依赖与 zookeeper,所以小伙伴们先筹备好 zookeeper 环境(三个节点即可),而后咱们来一起构建 kafka broker。

## 解压命令:tar -zxvf kafka_2.12-2.1.0.tgz -C /usr/local/
## 改名命令:mv kafka_2.12-2.1.0/ kafka_2.12
## 进入解压后的目录,批改 server.properties 文件:vim /usr/local/kafka_2.12/config/server.properties
## 批改配置:broker.id=0
port=9092
host.name=192.168.11.51
advertised.host.name=192.168.11.51
log.dirs=/usr/local/kafka_2.12/kafka-logs
num.partitions=2
zookeeper.connect=192.168.11.111:2181,192.168.11.112:2181,192.168.11.113:2181

## 建设日志文件夹:mkdir /usr/local/kafka_2.12/kafka-logs

## 启动 kafka:/usr/local/kafka_2.12/bin/kafka-server-start.sh /usr/local/kafka_2.12/config/server.properties &

创立两个 topic

## 创立 topic
kafka-topics.sh --zookeeper 192.168.11.111:2181 --create --topic app-log-collector --partitions 1  --replication-factor 1
kafka-topics.sh --zookeeper 192.168.11.111:2181 --create --topic error-log-collector --partitions 1  --replication-factor 1 

咱们能够查看一下 topic 状况

kafka-topics.sh --zookeeper 192.168.11.111:2181 --topic app-log-test --describe

能够看到曾经胜利启用了 app-log-collectorerror-log-collector两个 topic

filebeat 装置和启用

filebeat 下载

cd /usr/local/software
tar -zxvf filebeat-6.6.0-linux-x86_64.tar.gz -C /usr/local/
cd /usr/local
mv filebeat-6.6.0-linux-x86_64/ filebeat-6.6.0

配置 filebeat,能够参考下方 yml 配置文件

vim /usr/local/filebeat-5.6.2/filebeat.yml
###################### Filebeat Configuration Example #########################
filebeat.prospectors:

- input_type: log

  paths:
    ## app- 服务名称.log, 为什么写死,避免产生轮转抓取历史数据
    - /usr/local/logs/app-collector.log
  #定义写入 ES 时的 _type 值
  document_type: "app-log"
  multiline:
    #pattern: '^\s*(\d{4}|\d{2})\-(\d{2}|[a-zA-Z]{3})\-(\d{2}|\d{4})'   # 指定匹配的表达式(匹配以 2017-11-15 08:04:23:889 工夫格局结尾的字符串)pattern: '^\['                              # 指定匹配的表达式(匹配以 "{ 结尾的字符串)negate: true                                # 是否匹配到
    match: after                                # 合并到上一行的开端
    max_lines: 2000                             # 最大的行数
    timeout: 2s                                 # 如果在规定工夫没有新的日志事件就不期待前面的日志
  fields:
    logbiz: collector
    logtopic: app-log-collector   ## 按服务划分用作 kafka topic
    evn: dev

- input_type: log

  paths:
    - /usr/local/logs/error-collector.log
  document_type: "error-log"
  multiline:
    #pattern: '^\s*(\d{4}|\d{2})\-(\d{2}|[a-zA-Z]{3})\-(\d{2}|\d{4})'   # 指定匹配的表达式(匹配以 2017-11-15 08:04:23:889 工夫格局结尾的字符串)pattern: '^\['                              # 指定匹配的表达式(匹配以 "{ 结尾的字符串)negate: true                                # 是否匹配到
    match: after                                # 合并到上一行的开端
    max_lines: 2000                             # 最大的行数
    timeout: 2s                                 # 如果在规定工夫没有新的日志事件就不期待前面的日志
  fields:
    logbiz: collector
    logtopic: error-log-collector   ## 按服务划分用作 kafka topic
    evn: dev
    
output.kafka:
  enabled: true
  hosts: ["192.168.11.51:9092"]
  topic: '%{[fields.logtopic]}'
  partition.hash:
    reachable_only: true
  compression: gzip
  max_message_bytes: 1000000
  required_acks: 1
logging.to_files: true

filebeat 启动:

查看配置是否正确

cd /usr/local/filebeat-6.6.0
./filebeat -c filebeat.yml -configtest
## Config OK

启动 filebeat

/usr/local/filebeat-6.6.0/filebeat &

查看是否启动胜利

ps -ef | grep filebeat

能够看到 filebeat 曾经启动胜利

而后咱们拜访 192.168.11.31:8001/index 和 192.168.11.31:8001/err,再查看 kafka 的 logs 文件,能够看到曾经生成了 app-log-collector- 0 和 error-log-collector- 0 文件,阐明 filebeat 曾经帮咱们把数据收集好放到了 kafka 上。

logstash 装置

咱们在 logstash 的装置目录下新建一个文件夹

mkdir scrpit

而后 cd 进该文件,创立一个 logstash-script.conf 文件

cd scrpit
vim logstash-script.conf
## multiline 插件也能够用于其余相似的堆栈式信息,比方 linux 的内核日志。input {
  kafka {
    ## app-log- 服务名称
    topics_pattern => "app-log-.*"
    bootstrap_servers => "192.168.11.51:9092"
 codec => json
 consumer_threads => 1 ## 减少 consumer 的并行生产线程数
 decorate_events => true
    #auto_offset_rest => "latest"
 group_id => "app-log-group"
   }
   
   kafka {
    ## error-log- 服务名称
    topics_pattern => "error-log-.*"
    bootstrap_servers => "192.168.11.51:9092"
 codec => json
 consumer_threads => 1
 decorate_events => true
    #auto_offset_rest => "latest"
 group_id => "error-log-group"
   }
   
}

filter {
  
  ## 时区转换
  ruby {code => "event.set('index_time',event.timestamp.time.localtime.strftime('%Y.%m.%d'))"
  }

  if "app-log" in [fields][logtopic]{
    grok {
        ## 表达式, 这里对应的是 Springboot 输入的日志格局
        match => ["message", "\[%{NOTSPACE:currentDateTime}\] \[%{NOTSPACE:level}\] \[%{NOTSPACE:thread-id}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{DATA:ip}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## (\'\'|%{QUOTEDSTRING:throwable})"]
    }
  }

  if "error-log" in [fields][logtopic]{
    grok {
        ## 表达式
        match => ["message", "\[%{NOTSPACE:currentDateTime}\] \[%{NOTSPACE:level}\] \[%{NOTSPACE:thread-id}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{DATA:ip}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## (\'\'|%{QUOTEDSTRING:throwable})"]
    }
  }
  
}

## 测试输入到控制台:output {stdout { codec => rubydebug}
}


## elasticsearch:output {if "app-log" in [fields][logtopic]{
 ## es 插件
 elasticsearch {
       # es 服务地址
        hosts => ["192.168.11.35:9200"]
        # 用户名明码      
        user => "elastic"
        password => "123456"
        ## 索引名,+ 号结尾的,就会主动认为前面是工夫格局:## javalog-app-service-2019.01.23 
        index => "app-log-%{[fields][logbiz]}-%{index_time}"
        # 是否嗅探集群 ip:个别设置 true;http://192.168.11.35:9200/_nodes/http?pretty
        # 通过嗅探机制进行 es 集群负载平衡发日志音讯
        sniffing => true
        # logstash 默认自带一个 mapping 模板,进行模板笼罩
        template_overwrite => true
    } 
  }
  
  if "error-log" in [fields][logtopic]{
 elasticsearch {hosts => ["192.168.11.35:9200"]    
        user => "elastic"
        password => "123456"
        index => "error-log-%{[fields][logbiz]}-%{index_time}"
        sniffing => true
        template_overwrite => true
    } 
  }
  

}

启动 logstash

/usr/local/logstash-6.6.0/bin/logstash -f /usr/local/logstash-6.6.0/script/logstash-script.conf &

期待启动胜利,咱们再次拜访192.168.11.31:8001/err

能够看到控制台开始打印日志

ElasticSearch 与 Kibana

ES 和 Kibana 的搭建之前没写过博客,网上材料也比拟多,大家能够自行搜寻。

搭建实现后,拜访 Kibana 的治理页面192.168.11.35:5601,抉择 Management -> Kinaba – Index Patterns

而后 Create index pattern

  • index pattern 输出 app-log-*
  • Time Filter field name 抉择 currentDateTime

这样咱们就胜利创立了索引。

咱们再次拜访192.168.11.31:8001/err,这个时候就能够看到咱们曾经命中了一条 log 信息

外面展现了日志的全量信息

到这里,咱们残缺的日志收集及可视化就搭建实现了!

原文链接:https://blog.csdn.net/lt32603…

版权申明:本文为 CSDN 博主「简略随风」的原创文章,遵循 CC 4.0 BY-SA 版权协定,转载请附上原文出处链接及本申明。

近期热文举荐:

1.1,000+ 道 Java 面试题及答案整顿(2021 最新版)

2. 别在再满屏的 if/ else 了,试试策略模式,真香!!

3. 卧槽!Java 中的 xx ≠ null 是什么新语法?

4.Spring Boot 2.5 重磅公布,光明模式太炸了!

5.《Java 开发手册(嵩山版)》最新公布,速速下载!

感觉不错,别忘了顺手点赞 + 转发哦!

正文完
 0