系统优化
#!/bin/bash############################################################## File Name: centos7-optimization.sh# Author: Sean_Li# Created Time: 20210414#==================================================================echo "check centos7 or centos6"VERSION=`cat /etc/redhat-release|awk -F " " '{print \$3}'|awk -F "." '{print \$1}'`if [ "${VERSION}" == "6" ];then echo "centos6" exit 3else echo "centos7"fi#history export HISTTIMEFORMAT="%F %T `whoami` " && echo 'export HISTTIMEFORMAT="%F %T `whoami` "' >> /etc/profile && source /etc/profile#增加公网DNS地址cat >> /etc/resolv.conf << EOFnameserver 114.114.114.114EOF#禁用selinuxsed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/configsetenforce 0#敞开防火墙systemctl disable firewalld.service systemctl stop firewalld.service#批改字符集sed -i 's/LANG="en_US.UTF-8"/LANG="zh_CN.UTF-8"/' /etc/locale.conf#localectl set-locale LANG=zh_CN.UTF-8 source /etc/locale.conf#Yum源更换为国内阿里源yum install wget telnet -ymv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backupwget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo#增加阿里的epel源#add the epelwget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo#yum从新建设缓存yum clean allyum makecacheyum install -y ntpdate net-tools lrzsz tree cmake gcc gcc-c++ autoconf l libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel curl curl-devel libxslt-devel libtool-ltdl-devel make wget docbook-dtds asciidoc e2fsprogs-devel gd gd-devel openssl openssl-devel lsof git unzip gettext-devel gettext libevent libevent-devel pcre pcre-devel vim readline readline-devel#主机名ipname=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" | awk -F '.' '{print $3"_"$4}'`echo $ipnamehostnamectl set-hostname insurace-$ipname#同步time/usr/sbin/ntpdate cn.pool.ntp.orgecho "* 4 * * * /usr/sbin/ntpdate cn.pool.ntp.org > /dev/null 2>&1" >> /var/spool/cron/rootsystemctl restart crond.servicehwclock --systohctimedatectl set-timezone Asia/Shanghai#配置 sshsed -i 's/^GSSAPIAuthentication yes$/GSSAPIAuthentication no/' /etc/ssh/sshd_configsed -i 's/#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config #禁止DNS反向解析客户端#sed -i 's/#Port 22/Port 2223/' /etc/ssh/sshd_configsystemctl restart sshd.service#零碎最大关上那啥ulimit -SHn 102400echo "ulimit -SHn 102400" >> /etc/rc.localchmod +x /etc/rc.d/rc.localcat >> /etc/security/limits.conf << EOF* soft nofile 655350* hard nofile 655350EOFsed -i 's/4096/65535/g' /etc/security/limits.d/20-nproc.conf#内核参数优化cat >> /etc/sysctl.conf << EOFnet.ipv6.conf.all.disable_ipv6 = 1net.ipv6.conf.default.disable_ipv6 = 1#决定查看过期多久街坊条目net.ipv4.neigh.default.gc_stale_time=120#应用arp_announce / arp_ignore解决ARP映射问题net.ipv4.conf.default.arp_announce = 2net.ipv4.conf.all.arp_announce=2net.ipv4.conf.lo.arp_announce=2# 防止放大攻打net.ipv4.icmp_echo_ignore_broadcasts = 1# 开启歹意icmp谬误音讯爱护net.ipv4.icmp_ignore_bogus_error_responses = 1#敞开路由转发net.ipv4.ip_forward = 0net.ipv4.conf.all.send_redirects = 0net.ipv4.conf.default.send_redirects = 0#开启反向门路过滤net.ipv4.conf.all.rp_filter = 1net.ipv4.conf.default.rp_filter = 1#解决无源路由的包net.ipv4.conf.all.accept_source_route = 0net.ipv4.conf.default.accept_source_route = 0#敞开sysrq性能kernel.sysrq = 0#core文件名中增加pid作为扩展名kernel.core_uses_pid = 1# 开启SYN洪水攻打爱护net.ipv4.tcp_syncookies = 1#批改音讯队列长度kernel.msgmnb = 65536kernel.msgmax = 65536#设置最大内存共享段大小byteskernel.shmmax = 68719476736kernel.shmall = 4294967296#timewait的数量,默认180000net.ipv4.tcp_max_tw_buckets = 6000net.ipv4.tcp_sack = 1net.ipv4.tcp_window_scaling = 1net.ipv4.tcp_rmem = 4096 87380 4194304net.ipv4.tcp_wmem = 4096 16384 4194304net.core.wmem_default = 8388608net.core.rmem_default = 8388608net.core.rmem_max = 16777216net.core.wmem_max = 16777216#每个网络接口接管数据包的速率比内核解决这些包的速率快时,容许送到队列的数据包的最大数目net.core.netdev_max_backlog = 262144#限度仅仅是为了避免简略的DoS 攻打net.ipv4.tcp_max_orphans = 3276800#未收到客户端确认信息的连贯申请的最大值net.ipv4.tcp_max_syn_backlog = 262144net.ipv4.tcp_timestamps = 0#内核放弃建设连贯之前发送SYNACK 包的数量net.ipv4.tcp_synack_retries = 1#内核放弃建设连贯之前发送SYN 包的数量net.ipv4.tcp_syn_retries = 1#启用timewait 疾速回收net.ipv4.tcp_tw_recycle = 1#开启重用。容许将TIME-WAIT sockets 从新用于新的TCP 连贯net.ipv4.tcp_tw_reuse = 1net.ipv4.tcp_mem = 94500000 915000000 927000000net.ipv4.tcp_fin_timeout = 1#当keepalive 起用的时候,TCP 发送keepalive 音讯的频度。缺省是2 小时net.ipv4.tcp_keepalive_time = 1800net.ipv4.tcp_keepalive_probes = 3net.ipv4.tcp_keepalive_intvl = 15#容许零碎关上的端口范畴net.ipv4.ip_local_port_range = 1024 65000#批改防火墙表大小,默认65536net.netfilter.nf_conntrack_max=655350net.netfilter.nf_conntrack_tcp_timeout_established=1200# 确保无人能批改路由表net.ipv4.conf.all.accept_redirects = 0net.ipv4.conf.default.accept_redirects = 0net.ipv4.conf.all.secure_redirects = 0net.ipv4.conf.default.secure_redirects = 0vm.max_map_count = 1000000fs.nr_open = 10000000fs.file-max = 11000000EOF/sbin/sysctl -pecho "-----------------------success------------------------------"
软件下载
- wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz
- wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.11-2.2.1.tgz
- wget https://artifacts.elastic.co/downloads/kibana/kibana-7.6.2-linux-x86_64.tar.gz
装置 elasticsearch-7.6.2
[root@elk24 tar]# lselasticsearch-7.6.2-linux-x86_64.tar.gz kibana-7.6.2-linux-x86_64.tar.gz[root@elk24 tar]# tar -xf elasticsearch-7.6.2-linux-x86_64.tar.gz[root@elk24 tar]# mv elasticsearch-7.6.2-linux-x86_64 /data/[root@elk24 tar]# tar -xf kibana-7.6.2-linux-x86_64.tar.gz[root@elk24 tar]# mv kibana-7.6.2-linux-x86_64 /data/
config
[root@elk24 tar]# cd /data/elasticsearch-7.6.2/config/[root@elk24 config]# lselasticsearch.keystore elasticsearch.yml elasticsearch.yml.bak jvm.options log4j2.properties role_mapping.yml roles.yml users users_roles[root@elk24 config]# cat elasticsearch.ymlcluster.name: insurance-pro-7.6.2node.name: master-1node.master: truenode.data: truepath.data: /data/elasticsearch-7.6.2/datapath.logs: /data/elasticsearch-7.6.2/logshttp.port: 9200network.host: 0.0.0.0cluster.initial_master_nodes: ["10.110.24.88"]discovery.zen.ping.unicast.hosts: ["10.110.24.88"]discovery.zen.minimum_master_nodes: 2discovery.zen.ping_timeout: 30sdiscovery.zen.fd.ping_retries: 15discovery.zen.fd.ping_interval: 20sdiscovery.zen.master_election.ignore_non_master_pings: truehttp.cors.enabled: truehttp.cors.allow-origin: "*"http.cors.allow-headers: "Authorization,X-Requested-With,Content-Length,Content-Type"xpack.security.enabled: truexpack.security.transport.ssl.enabled: truesearch.max_buckets: 200000bootstrap.memory_lock: falsebootstrap.system_call_filter: falsegateway.expected_nodes: 1###java[root@elk24 ik]# cat /etc/profile# /etc/profile..export JAVA_HOME=/data/elasticsearch-7.6.2/jdkexport PATH=$JAVA_HOME/bin:$PATHexport CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar[root@elk24 ik]#
kibana conf
[root@elk24 config]# lsapm.js kibana.yml kibana.yml.bak[root@elk24 config]# pwd/data/kibana-7.6.2-linux-x86_64/config[root@elk24 config]# cat kibana.ymlserver.port: 5601server.host: "0.0.0.0"server.name: "10.110.24.88"elasticsearch.hosts: ["http://10.110.24.88:9200"]elasticsearch.username: "elastic"elasticsearch.password: "Elast111111111#"elasticsearch.ssl.verificationMode: noneelasticsearch.requestTimeout: 90000i18n.locale: "zh-CN"[root@elk24 config]#
elasticsearch 装置ik分词
[root@elk24 ik]# mkdir /data/elasticsearch-7.6.2/plugins/ik###https://github.com/medcl/elasticsearch-analysis-ik/releases?after=v7.10.0##[root@elk24 ik]# pwd/data/elasticsearch-7.6.2/plugins/ik[root@elk24 ik]# wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.6.2/elasticsearch-analysis-ik-7.6.2.zip[root@elk24 ik]# tar -xf elasticsearch-analysis-ik-7.6.2.zip[root@elk24 ik]# mv elasticsearch-analysis-ik-7.6.2.zip /tmp[root@elk24 ik]# lscommons-codec-1.9.jar commons-logging-1.2.jar config elasticsearch-analysis-ik-7.6.2.jar httpclient-4.5.2.jar httpcore-4.4.4.jar plugin-descriptor.properties plugin-security.policy
破解x-pack插件获取永恒白金特权(仅供学习应用)
LicenseVerifier.java文件[root@elk24 opt]# cat LicenseVerifier.javapackage org.elasticsearch.license;/** * * Responsible for verifying signed licenses * */public class LicenseVerifier { /** * * verifies the license content with the signature using the packaged * * public key * * @param license to verify * * @return true if valid, false otherwise * */ public static boolean verifyLicense(final License license, byte[] publicKeyData) { return true; } public static boolean verifyLicense(final License license) { return true; }}XPackBuild.java 文件[root@elk24 opt]# cat XPackBuild.javapackage org.elasticsearch.xpack.core;import org.elasticsearch.common.SuppressForbidden;import org.elasticsearch.common.io.PathUtils;import java.io.IOException;import java.net.URISyntaxException;import java.net.URL;import java.nio.file.Files;import java.nio.file.Path;import java.util.jar.JarInputStream;import java.util.jar.Manifest;public class XPackBuild { public static final XPackBuild CURRENT; static { CURRENT = new XPackBuild("Unknown", "Unknown"); } /** * * Returns path to xpack codebase path * */ @SuppressForbidden(reason = "looks up path of xpack.jar directly") static Path getElasticsearchCodebase() { URL url = XPackBuild.class.getProtectionDomain().getCodeSource().getLocation(); try { return PathUtils.get(url.toURI()); } catch (URISyntaxException bogus) { throw new RuntimeException(bogus); } } private String shortHash; private String date; XPackBuild(String shortHash, String date) { this.shortHash = shortHash; this.date = date; } public String shortHash() { return shortHash; } public String date() { return date; }}[root@elk24 opt]# javac -cp "/data/elasticsearch-7.6.2/lib/elasticsearch-7.6.2.jar:/data/elasticsearch-7.6.2/lib/lucene-core-8.4.0.jar:/data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core-7.6.2.jar" LicenseVerifier.java [root@elk24 opt]# javac -cp "/data/elasticsearch-7.6.2/lib/elasticsearch-7.6.2.jar:/data/elasticsearch-7.6.2/lib/lucene-core-8.4.0.jar:/data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core-7.6.2.jar:/data/elasticsearch-7.6.2/lib/elasticsearch-core-7.6.2.jar" XPackBuild.java
将/data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core-7.6.2.jar文件和/opt下的两个class文件 拷贝到本人的电脑上 应用7zip压缩软件关上x-pack-core-7.6.2.jar 并替换两个class文件
启动es
[root@elk24 opt]# groupadd elasticsearch[root@elk24 opt]# useradd elasticsearch-G elasticsearch[root@elk24 opt]# chown -R elasticsearch.elasticsearch elasticsearch-7.6.2/[root@elk24 opt]# su - elasticsearch[root@elk24 opt]# vim /data/elasticsearch-7.6.2/config/jvm.options-Xms8g-Xmx8g[root@elk24 opt]# /data/elasticsearch-7.6.2/bin/elasticsearch -d
配置es集群的治理用户明码
[elasticsearch@elk24 bin]$ ./elasticsearch-setup-passwords interactiveInitiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user.You will be prompted to enter passwords as the process progresses.Please confirm that you would like to continue [y/N]yEnter password for [elastic]: Reenter password for [elastic]: . ..Changed password for user [elastic]
kafka conf
[root@kafka24 ~]# wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.11-2.2.1.tgz[root@kafka24 ~]# cd /data/[root@kafka24 data]# lskafka kafka-logs zookeeper[root@kafka24 data]# cd kafka/config/[root@kafka24 config]# cat zookeeper.properties dataDir=/data/zookeeperclientPort=2181maxClientCnxns=0tickTime=2000initLimit=10syncLimit=5[root@kafka24 config]#[root@kafka24 config]# grep -v "#" server.properties broker.id=0num.network.threads=3num.io.threads=8socket.send.buffer.bytes=102400socket.receive.buffer.bytes=102400socket.request.max.bytes=104857600log.dirs=/data/kafka-logsnum.partitions=1num.recovery.threads.per.data.dir=1offsets.topic.replication.factor=1transaction.state.log.replication.factor=1transaction.state.log.min.isr=1log.retention.hours=48log.segment.bytes=1073741824log.retention.check.interval.ms=300000zookeeper.connect=10.110.24.89:2181listeners=PLAINTEXT://10.110.24.89:9092advertised.listeners=PLAINTEXT://10.110.24.89:9092zookeeper.connection.timeout.ms=6000group.initial.rebalance.delay.ms=0[root@kafka24 config]# [root@kafka24 data]# cat /etc/rc.local touch /var/lock/subsys/localulimit -SHn 102400#/data/zookeeper/bin/zkServer.sh start #/data/kafka/bin/kafka-server-start.sh -daemon /data/kafka/config/server.propertiescd /data/kafka/bin && nohup ./zookeeper-server-start.sh ../config/zookeeper.properties &cd /data/kafka/bin && nohup ./kafka-server-start.sh ../config/server.properties > kafka.log &
logstash config
[root@insurace-24 conf]# cat logstash-configmap-template.yamlkind: ConfigMapapiVersion: v1metadata: name: logstash-#project#-#topics_pattern#-#profile# namespace: defaultdata: logstash-#project#-#topics_pattern#-#profile#.conf: | input { kafka { bootstrap_servers => ["10.110.24.89:9092"] topics_pattern => "#topics_pattern#.*" codec => "json" consumer_threads => 5 auto_offset_reset => "latest" group_id => "#topics_pattern#" client_id => "#topics_pattern#" decorate_events => true #auto_commit_interval_ms => 5000 } } filter { json { source => "message" } date { match => [ "timestamp" ,"dd/MMM/YYYY:HH:mm:ss Z" ] } mutate { remove_field => "timestamp" } if "_geoip_lookup_failure" in [tags] { drop { } } } output { elasticsearch { hosts => ["10.110.24.88:9200"] index => "logstash-#project#-#topics_pattern#-%{+YYYY-MM-dd}" user => elastic password => "Elasti111111111111111*#" } stdout { codec => rubydebug } }
kibana start
[root@elk24 ik]# cat /etc/rc.local ulimit -SHn 102400su elasticsearch -c "/data/elasticsearch-7.6.2/bin/elasticsearch -d"cd /data/kibana-7.6.2-linux-x86_64/bin && nohup /data/kibana-7.6.2-linux-x86_64/bin/kibana --allow-root &