系统优化
#!/bin/bash
#############################################################
# File Name: centos7-optimization.sh
# Author: Sean_Li
# Created Time: 20210414
#==================================================================
echo "check centos7 or centos6"
VERSION=`cat /etc/redhat-release|awk -F ""'{print \$3}'|awk -F"."'{print \$1}'`
if ["${VERSION}" == "6" ];then
echo "centos6"
exit 3
else
echo "centos7"
fi
#history
export HISTTIMEFORMAT="%F %T `whoami`" && echo 'export HISTTIMEFORMAT="%F %T `whoami` "' >> /etc/profile && source /etc/profile
#增加公网 DNS 地址
cat >> /etc/resolv.conf << EOF
nameserver 114.114.114.114
EOF
#禁用 selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
#敞开防火墙
systemctl disable firewalld.service
systemctl stop firewalld.service
#批改字符集
sed -i 's/LANG="en_US.UTF-8"/LANG="zh_CN.UTF-8"/' /etc/locale.conf
#localectl set-locale LANG=zh_CN.UTF-8 source /etc/locale.conf
#Yum 源更换为国内阿里源
yum install wget telnet -y
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#增加阿里的 epel 源
#add the epel
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
#yum 从新建设缓存
yum clean all
yum makecache
yum install -y ntpdate net-tools lrzsz tree cmake gcc gcc-c++ autoconf l libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel curl curl-devel libxslt-devel libtool-ltdl-devel make wget docbook-dtds asciidoc e2fsprogs-devel gd gd-devel openssl openssl-devel lsof git unzip gettext-devel gettext libevent libevent-devel pcre pcre-devel vim readline readline-devel
#主机名
ipname=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" | awk -F '.' '{print $3"_"$4}'`
echo $ipname
hostnamectl set-hostname insurace-$ipname
#同步 time
/usr/sbin/ntpdate cn.pool.ntp.org
echo "* 4 * * * /usr/sbin/ntpdate cn.pool.ntp.org > /dev/null 2>&1" >> /var/spool/cron/root
systemctl restart crond.service
hwclock --systohc
timedatectl set-timezone Asia/Shanghai
#配置 ssh
sed -i 's/^GSSAPIAuthentication yes$/GSSAPIAuthentication no/' /etc/ssh/sshd_config
sed -i 's/#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config #禁止 DNS 反向解析客户端
#sed -i 's/#Port 22/Port 2223/' /etc/ssh/sshd_config
systemctl restart sshd.service
#零碎最大关上那啥
ulimit -SHn 102400
echo "ulimit -SHn 102400" >> /etc/rc.local
chmod +x /etc/rc.d/rc.local
cat >> /etc/security/limits.conf << EOF
* soft nofile 655350
* hard nofile 655350
EOF
sed -i 's/4096/65535/g' /etc/security/limits.d/20-nproc.conf
#内核参数优化
cat >> /etc/sysctl.conf << EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
#决定查看过期多久街坊条目
net.ipv4.neigh.default.gc_stale_time=120
#应用 arp_announce / arp_ignore 解决 ARP 映射问题
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.all.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
# 防止放大攻打
net.ipv4.icmp_echo_ignore_broadcasts = 1
# 开启歹意 icmp 谬误音讯爱护
net.ipv4.icmp_ignore_bogus_error_responses = 1
#敞开路由转发
net.ipv4.ip_forward = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0
#开启反向门路过滤
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
#解决无源路由的包
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0
#敞开 sysrq 性能
kernel.sysrq = 0
#core 文件名中增加 pid 作为扩展名
kernel.core_uses_pid = 1
# 开启 SYN 洪水攻打爱护
net.ipv4.tcp_syncookies = 1
#批改音讯队列长度
kernel.msgmnb = 65536
kernel.msgmax = 65536
#设置最大内存共享段大小 bytes
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
#timewait 的数量,默认 180000
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
#每个网络接口接管数据包的速率比内核解决这些包的速率快时,容许送到队列的数据包的最大数目
net.core.netdev_max_backlog = 262144
#限度仅仅是为了避免简略的 DoS 攻打
net.ipv4.tcp_max_orphans = 3276800
#未收到客户端确认信息的连贯申请的最大值
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_timestamps = 0
#内核放弃建设连贯之前发送 SYNACK 包的数量
net.ipv4.tcp_synack_retries = 1
#内核放弃建设连贯之前发送 SYN 包的数量
net.ipv4.tcp_syn_retries = 1
#启用 timewait 疾速回收
net.ipv4.tcp_tw_recycle = 1
#开启重用。容许将 TIME-WAIT sockets 从新用于新的 TCP 连贯
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_fin_timeout = 1
#当 keepalive 起用的时候,TCP 发送 keepalive 音讯的频度。缺省是 2 小时
net.ipv4.tcp_keepalive_time = 1800
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
#容许零碎关上的端口范畴
net.ipv4.ip_local_port_range = 1024 65000
#批改防火墙表大小,默认 65536
net.netfilter.nf_conntrack_max=655350
net.netfilter.nf_conntrack_tcp_timeout_established=1200
# 确保无人能批改路由表
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
vm.max_map_count = 1000000
fs.nr_open = 10000000
fs.file-max = 11000000
EOF
/sbin/sysctl -p
echo "-----------------------success------------------------------"
软件下载
- wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz
- wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.11-2.2.1.tgz
- wget https://artifacts.elastic.co/downloads/kibana/kibana-7.6.2-linux-x86_64.tar.gz
装置 elasticsearch-7.6.2
[root@elk24 tar]# ls
elasticsearch-7.6.2-linux-x86_64.tar.gz kibana-7.6.2-linux-x86_64.tar.gz
[root@elk24 tar]# tar -xf elasticsearch-7.6.2-linux-x86_64.tar.gz
[root@elk24 tar]# mv elasticsearch-7.6.2-linux-x86_64 /data/
[root@elk24 tar]# tar -xf kibana-7.6.2-linux-x86_64.tar.gz
[root@elk24 tar]# mv kibana-7.6.2-linux-x86_64 /data/
config
[root@elk24 tar]# cd /data/elasticsearch-7.6.2/config/
[root@elk24 config]# ls
elasticsearch.keystore elasticsearch.yml elasticsearch.yml.bak jvm.options log4j2.properties role_mapping.yml roles.yml users users_roles
[root@elk24 config]# cat elasticsearch.yml
cluster.name: insurance-pro-7.6.2
node.name: master-1
node.master: true
node.data: true
path.data: /data/elasticsearch-7.6.2/data
path.logs: /data/elasticsearch-7.6.2/logs
http.port: 9200
network.host: 0.0.0.0
cluster.initial_master_nodes: ["10.110.24.88"]
discovery.zen.ping.unicast.hosts: ["10.110.24.88"]
discovery.zen.minimum_master_nodes: 2
discovery.zen.ping_timeout: 30s
discovery.zen.fd.ping_retries: 15
discovery.zen.fd.ping_interval: 20s
discovery.zen.master_election.ignore_non_master_pings: true
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: "Authorization,X-Requested-With,Content-Length,Content-Type"
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
search.max_buckets: 200000
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
gateway.expected_nodes: 1
###java
[root@elk24 ik]# cat /etc/profile
# /etc/profile
.
.
export JAVA_HOME=/data/elasticsearch-7.6.2/jdk
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
[root@elk24 ik]#
kibana conf
[root@elk24 config]# ls
apm.js kibana.yml kibana.yml.bak
[root@elk24 config]# pwd
/data/kibana-7.6.2-linux-x86_64/config
[root@elk24 config]# cat kibana.yml
server.port: 5601
server.host: "0.0.0.0"
server.name: "10.110.24.88"
elasticsearch.hosts: ["http://10.110.24.88:9200"]
elasticsearch.username: "elastic"
elasticsearch.password: "Elast111111111#"
elasticsearch.ssl.verificationMode: none
elasticsearch.requestTimeout: 90000
i18n.locale: "zh-CN"
[root@elk24 config]#
elasticsearch 装置 ik 分词
[root@elk24 ik]# mkdir /data/elasticsearch-7.6.2/plugins/ik
###https://github.com/medcl/elasticsearch-analysis-ik/releases?after=v7.10.0##
[root@elk24 ik]# pwd
/data/elasticsearch-7.6.2/plugins/ik
[root@elk24 ik]# wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.6.2/elasticsearch-analysis-ik-7.6.2.zip
[root@elk24 ik]# tar -xf elasticsearch-analysis-ik-7.6.2.zip
[root@elk24 ik]# mv elasticsearch-analysis-ik-7.6.2.zip /tmp
[root@elk24 ik]# ls
commons-codec-1.9.jar commons-logging-1.2.jar config elasticsearch-analysis-ik-7.6.2.jar httpclient-4.5.2.jar httpcore-4.4.4.jar plugin-descriptor.properties plugin-security.policy
破解 x -pack 插件获取永恒白金特权(仅供学习应用)
LicenseVerifier.java 文件
[root@elk24 opt]# cat LicenseVerifier.java
package org.elasticsearch.license;
/**
* * Responsible for verifying signed licenses
* */
public class LicenseVerifier {
/**
* * verifies the license content with the signature using the packaged
* * public key
* * @param license to verify
* * @return true if valid, false otherwise
* */
public static boolean verifyLicense(final License license, byte[] publicKeyData) {return true;}
public static boolean verifyLicense(final License license) {return true;}
}
XPackBuild.java 文件
[root@elk24 opt]# cat XPackBuild.java
package org.elasticsearch.xpack.core;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
public class XPackBuild {
public static final XPackBuild CURRENT;
static {CURRENT = new XPackBuild("Unknown", "Unknown");
}
/**
* * Returns path to xpack codebase path
* */
@SuppressForbidden(reason = "looks up path of xpack.jar directly")
static Path getElasticsearchCodebase() {URL url = XPackBuild.class.getProtectionDomain().getCodeSource().getLocation();
try {return PathUtils.get(url.toURI());
} catch (URISyntaxException bogus) {throw new RuntimeException(bogus);
}
}
private String shortHash;
private String date;
XPackBuild(String shortHash, String date) {
this.shortHash = shortHash;
this.date = date;
}
public String shortHash() {return shortHash;}
public String date() {return date;}
}
[root@elk24 opt]# javac -cp "/data/elasticsearch-7.6.2/lib/elasticsearch-7.6.2.jar:/data/elasticsearch-7.6.2/lib/lucene-core-8.4.0.jar:/data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core-7.6.2.jar" LicenseVerifier.java
[root@elk24 opt]# javac -cp "/data/elasticsearch-7.6.2/lib/elasticsearch-7.6.2.jar:/data/elasticsearch-7.6.2/lib/lucene-core-8.4.0.jar:/data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core-7.6.2.jar:/data/elasticsearch-7.6.2/lib/elasticsearch-core-7.6.2.jar" XPackBuild.java
将 /data/elasticsearch-7.6.2/modules/x-pack-core/x-pack-core-7.6.2.jar 文件和 /opt 下的两个 class 文件 拷贝到本人的电脑上 应用 7zip 压缩软件关上 x -pack-core-7.6.2.jar 并替换两个 class 文件
启动 es
[root@elk24 opt]# groupadd elasticsearch
[root@elk24 opt]# useradd elasticsearch-G elasticsearch
[root@elk24 opt]# chown -R elasticsearch.elasticsearch elasticsearch-7.6.2/
[root@elk24 opt]# su - elasticsearch
[root@elk24 opt]# vim /data/elasticsearch-7.6.2/config/jvm.options
-Xms8g
-Xmx8g
[root@elk24 opt]# /data/elasticsearch-7.6.2/bin/elasticsearch -d
配置 es 集群的治理用户明码
[elasticsearch@elk24 bin]$ ./elasticsearch-setup-passwords interactive
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y
Enter password for [elastic]:
Reenter password for [elastic]:
.
.
.
Changed password for user [elastic]
kafka conf
[root@kafka24 ~]# wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.11-2.2.1.tgz
[root@kafka24 ~]# cd /data/
[root@kafka24 data]# ls
kafka kafka-logs zookeeper
[root@kafka24 data]# cd kafka/config/
[root@kafka24 config]# cat zookeeper.properties
dataDir=/data/zookeeper
clientPort=2181
maxClientCnxns=0
tickTime=2000
initLimit=10
syncLimit=5
[root@kafka24 config]#
[root@kafka24 config]# grep -v "#" server.properties
broker.id=0
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=48
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.110.24.89:2181
listeners=PLAINTEXT://10.110.24.89:9092
advertised.listeners=PLAINTEXT://10.110.24.89:9092
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
[root@kafka24 config]#
[root@kafka24 data]# cat /etc/rc.local
touch /var/lock/subsys/local
ulimit -SHn 102400
#/data/zookeeper/bin/zkServer.sh start
#/data/kafka/bin/kafka-server-start.sh -daemon /data/kafka/config/server.properties
cd /data/kafka/bin && nohup ./zookeeper-server-start.sh ../config/zookeeper.properties &
cd /data/kafka/bin && nohup ./kafka-server-start.sh ../config/server.properties > kafka.log &
logstash config
[root@insurace-24 conf]# cat logstash-configmap-template.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: logstash-#project#-#topics_pattern#-#profile#
namespace: default
data:
logstash-#project#-#topics_pattern#-#profile#.conf: |
input {
kafka {bootstrap_servers => ["10.110.24.89:9092"]
topics_pattern => "#topics_pattern#.*"
codec => "json"
consumer_threads => 5
auto_offset_reset => "latest"
group_id => "#topics_pattern#"
client_id => "#topics_pattern#"
decorate_events => true
#auto_commit_interval_ms => 5000
}
}
filter {
json {source => "message"}
date {match => [ "timestamp" ,"dd/MMM/YYYY:HH:mm:ss Z"]
}
mutate {remove_field => "timestamp"}
if "_geoip_lookup_failure" in [tags] {drop {} }
}
output {
elasticsearch {hosts => ["10.110.24.88:9200"]
index => "logstash-#project#-#topics_pattern#-%{+YYYY-MM-dd}"
user => elastic
password => "Elasti111111111111111*#"
}
stdout {codec => rubydebug}
}
kibana start
[root@elk24 ik]# cat /etc/rc.local
ulimit -SHn 102400
su elasticsearch -c "/data/elasticsearch-7.6.2/bin/elasticsearch -d"
cd /data/kibana-7.6.2-linux-x86_64/bin && nohup /data/kibana-7.6.2-linux-x86_64/bin/kibana --allow-root &