Elasticsearch

Node-1

#装置dockersudo yum update -ysudo yum install -y yum-utils device-mapper-persistent-data lvm2sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposudo yum install docker-ce -y;docker -v;mkdir -p /etc/docker;cd /etc/docker;cat > daemon.json <<EOF{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]}EOFsudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-composechmod +x /usr/local/bin/docker-composedocker-compose -vsystemctl daemon-reload;systemctl restart docker;docker info;systemctl enable dockersystemctl start dockersystemctl status dockerdocker ps -a#创立目录上传证书mkdir -p /data/modules/es/certs#上传证书到/data/modules/es/config/certs#链接:https://pan.baidu.com/s/1_-RojJNqt1w_o8P9ugDyTQ 提取码:o93o mkdir -p /data/modules/es/configmkdir -p /data/modules/es/logsmkdir -p /data/modules/es/datamkdir -p /data/modules/es/plugins## 写入配置文件cd /data/modules/es/configcat > elasticsearch.yml <<  EOFcluster.name: es-clusternode.name: node-1network.host: 0.0.0.0network.publish_host: 10.0.0.21http.port: 9200transport.port: 9300bootstrap.memory_lock: truediscovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]http.cors.enabled: truehttp.cors.allow-origin: "*"xpack.security.enabled: truexpack.security.transport.ssl.enabled: truexpack.security.transport.ssl.verification_mode: certificatexpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"xpack.monitoring.collection.enabled: truexpack.monitoring.exporters.my_local.type: localxpack.monitoring.exporters.my_local.use_ingest: falseEOF#受权开始口chown es:es /data/modules -Rfirewall-cmd --zone=public --add-port=9100/tcp --permanent;firewall-cmd --zone=public --add-port=9200/tcp --permanent;firewall-cmd --zone=public --add-port=9300/tcp --permanent;firewall-cmd --zone=public --add-service=http --permanent;firewall-cmd --zone=public --add-service=https --permanent;firewall-cmd --reload;firewall-cmd --list-all;#容器网络systemctl restart dockerdocker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \es-net## 创立容器docker run --name es \-d --network=es-net \--ip=10.10.10.21 \--restart=always \--publish 9200:9200 \--publish 9300:9300 \--privileged=true \--ulimit nofile=655350 \--ulimit memlock=-1 \--memory=2G \--memory-swap=-1 \--volume /data/modules/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \--volume /data/modules/es/data:/usr/share/elasticsearch/data \--volume /data/modules/es/logs:/usr/share/elasticsearch/logs \--volume /data/modules/es/certs:/usr/share/elasticsearch/config/certs \--volume /etc/localtime:/etc/localtime \-e TERM=dumb \-e ELASTIC_PASSWORD='elastic' \-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \-e path.data=data \-e path.logs=logs \-e node.master=true \-e node.data=true \-e node.ingest=false \-e node.attr.rack="0402-K03" \-e gateway.recover_after_nodes=1 \-e bootstrap.memory_lock=true \-e bootstrap.system_call_filter=false \-e indices.fielddata.cache.size="25%" \elasticsearch:7.17.0#登陆账号密码elasticelastic

Node-2

sudo yum update -ysudo yum install -y yum-utils device-mapper-persistent-data lvm2sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposudo yum install docker-ce -y;docker -v;mkdir -p /etc/docker;cd /etc/docker;cat > daemon.json <<EOF{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]}EOFsudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-composechmod +x /usr/local/bin/docker-composedocker-compose -vsystemctl daemon-reload;systemctl restart docker;docker info;systemctl enable dockersystemctl start dockersystemctl status dockerdocker ps -a#创立目录上传证书mkdir -p /data/modules/es/certs#上传证书到/data/modules/es/config/certs#链接:https://pan.baidu.com/s/1_-RojJNqt1w_o8P9ugDyTQ 提取码:o93o mkdir -p /data/modules/es/configmkdir -p /data/modules/es/logsmkdir -p /data/modules/es/datamkdir -p /data/modules/es/plugins## 写入配置文件cd /data/modules/es/configcat > elasticsearch.yml <<  EOFcluster.name: es-clusternode.name: node-2network.host: 0.0.0.0network.publish_host: 10.0.0.22http.port: 9200transport.port: 9300bootstrap.memory_lock: truediscovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]http.cors.enabled: truehttp.cors.allow-origin: "*"xpack.security.enabled: truexpack.security.transport.ssl.enabled: truexpack.security.transport.ssl.verification_mode: certificatexpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"xpack.monitoring.collection.enabled: truexpack.monitoring.exporters.my_local.type: localxpack.monitoring.exporters.my_local.use_ingest: falseEOF#受权开始口chown es:es /data/modules -Rfirewall-cmd --zone=public --add-port=9100/tcp --permanent;firewall-cmd --zone=public --add-port=9200/tcp --permanent;firewall-cmd --zone=public --add-port=9300/tcp --permanent;firewall-cmd --zone=public --add-service=http --permanent;firewall-cmd --zone=public --add-service=https --permanent;firewall-cmd --reload;firewall-cmd --list-all;#容器网络systemctl restart dockerdocker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \es-net## 创立容器docker run --name es \-d --network=es-net \--ip=10.10.10.22 \--restart=always \--publish 9200:9200 \--publish 9300:9300 \--privileged=true \--ulimit nofile=655350 \--ulimit memlock=-1 \--memory=2G \--memory-swap=-1 \--volume /data/modules/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \--volume /data/modules/es/data:/usr/share/elasticsearch/data \--volume /data/modules/es/logs:/usr/share/elasticsearch/logs \--volume /data/modules/es/certs:/usr/share/elasticsearch/config/certs \--volume /etc/localtime:/etc/localtime \-e TERM=dumb \-e ELASTIC_PASSWORD='elastic' \-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \-e path.data=data \-e path.logs=logs \-e node.master=true \-e node.data=true \-e node.ingest=false \-e node.attr.rack="0402-K03" \-e gateway.recover_after_nodes=1 \-e bootstrap.memory_lock=true \-e bootstrap.system_call_filter=false \-e indices.fielddata.cache.size="25%" \elasticsearch:7.17.0#登陆账号密码elasticelastic

node-3

sudo yum update -ysudo yum install -y yum-utils device-mapper-persistent-data lvm2sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposudo yum install docker-ce -y;docker -v;mkdir -p /etc/docker;cd /etc/docker;cat > daemon.json <<EOF{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]}EOFsudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-composechmod +x /usr/local/bin/docker-composedocker-compose -vsystemctl daemon-reload;systemctl restart docker;docker info;systemctl enable dockersystemctl start dockersystemctl status dockerdocker ps -a#创立目录上传证书mkdir -p /data/modules/es/certs#上传证书到/data/modules/es/config/certs#链接:https://pan.baidu.com/s/1_-RojJNqt1w_o8P9ugDyTQ 提取码:o93o mkdir -p /data/modules/es/configmkdir -p /data/modules/es/logsmkdir -p /data/modules/es/datamkdir -p /data/modules/es/plugins## 写入配置文件cd /data/modules/es/configcat > elasticsearch.yml <<  EOFcluster.name: es-clusternode.name: node-3network.host: 0.0.0.0network.publish_host: 10.0.0.23http.port: 9200transport.port: 9300bootstrap.memory_lock: truediscovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]http.cors.enabled: truehttp.cors.allow-origin: "*"xpack.security.enabled: truexpack.security.transport.ssl.enabled: truexpack.security.transport.ssl.verification_mode: certificatexpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"xpack.monitoring.collection.enabled: truexpack.monitoring.exporters.my_local.type: localxpack.monitoring.exporters.my_local.use_ingest: falseEOF#受权开始口chown es:es /data/modules -Rfirewall-cmd --zone=public --add-port=9100/tcp --permanent;firewall-cmd --zone=public --add-port=9200/tcp --permanent;firewall-cmd --zone=public --add-port=9300/tcp --permanent;firewall-cmd --zone=public --add-service=http --permanent;firewall-cmd --zone=public --add-service=https --permanent;firewall-cmd --reload;firewall-cmd --list-all;#容器网络systemctl restart dockerdocker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \es-net## 创立容器docker run --name es \-d --network=es-net \--ip=10.10.10.23 \--restart=always \--publish 9200:9200 \--publish 9300:9300 \--privileged=true \--ulimit nofile=655350 \--ulimit memlock=-1 \--memory=2G \--memory-swap=-1 \--volume /data/modules/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \--volume /data/modules/es/data:/usr/share/elasticsearch/data \--volume /data/modules/es/logs:/usr/share/elasticsearch/logs \--volume /data/modules/es/certs:/usr/share/elasticsearch/config/certs \--volume /etc/localtime:/etc/localtime \-e TERM=dumb \-e ELASTIC_PASSWORD='elastic' \-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \-e path.data=data \-e path.logs=logs \-e node.master=true \-e node.data=true \-e node.ingest=false \-e node.attr.rack="0402-K03" \-e gateway.recover_after_nodes=1 \-e bootstrap.memory_lock=true \-e bootstrap.system_call_filter=false \-e indices.fielddata.cache.size="25%" \elasticsearch:7.17.0#登陆账号密码elasticelastic

Kibana

Master

#装置Dockersudo yum update -ysudo yum install -y yum-utils device-mapper-persistent-data lvm2sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposudo yum install docker-ce -y;docker -v;mkdir -p /etc/docker;cd /etc/docker;cat > daemon.json <<EOF{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]}EOF#装置docker-composesudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-composechmod +x /usr/local/bin/docker-composedocker-compose -v#重启dockersystemctl daemon-reload;systemctl restart docker;docker info;systemctl enable dockersystemctl start dockersystemctl status dockerdocker ps -a#开启防火墙firewall-cmd --zone=public --add-port=5601/tcp --permanent;firewall-cmd --zone=public --add-service=http --permanent;firewall-cmd --zone=public --add-service=https --permanent;firewall-cmd --reload;firewall-cmd --list-all;#创立容器网络docker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \elk-net#下载镜像docker pull kibana:7.17.0#装置Kibanadocker run --name kibana \-d --network=elk-net \--ip=10.10.10.11 \--restart=always \--publish 5601:5601 \--privileged=true \kibana:7.17.0#进入容器docker exec -it -u root kibana /bin/bashcd /usr/share/kibana/configcat > kibana.yml << EOFserver.name: kibanaserver.port: 5601i18n.locale: "zh-CN"server.host: "0.0.0.0"kibana.index: ".kibana"server.shutdownTimeout: "5s"server.publicBaseUrl: "http://10.0.0.11:5601"monitoring.ui.container.elasticsearch.enabled: trueelasticsearch.hosts: ["http://10.0.0.21:9200","http://10.0.0.22:9200","http://10.0.0.23:9200"]elasticsearch.username: "elastic"elasticsearch.password: "elastic"EOFexitdocker restart kibana

Zookeeper+Kafka

Node-1

#Zookeeper-1#ELK同网络docker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \es-net#创立映射目录mkdir -p /data/modules/zookeeper/datamkdir -p /data/modules/zookeeper/logsmkdir -p /data/modules/zookeeper/conf#(myid)须要与server.(id)雷同 cd /data/modules/zookeeper/datacat > myid << EOF1EOF#写入配置文件cd /data/modules/zookeeper/conf/cat > zoo.cfg << EOF#集群节点间心跳查看距离,单位是毫秒,后续所有和工夫相干的配置都是该值的倍数,进行整数倍的配置,如4等于8000tickTime=2000#集群其余节点与Master通信实现的初始通信工夫限度,这里代表10*2000initLimit=10#若Master节点在超过syncLimit*tickTime的工夫还未收到响应,认为该节点宕机syncLimit=5#数据寄存目录dataDir=/data#ZK日志文件寄存门路dataLogDir=/logs#ZK服务端口clientPort=2181#单个客户端最大连接数限度,0代表不限度maxClientCnxns=60#快照文件保留的数量autopurge.snapRetainCount=3#清理快照文件和事务日志文件的频率,默认为0代表不开启,单位是小时autopurge.purgeInterval=1#server.A=B:C:D 集群设置,#A示意第几号服务器;#B是IP;#C是该服务器与leader通信端口;#D是leader挂掉后从新选举所用通信端口;两个端口号能够随便server.1=0.0.0.0:2888:3888server.2=10.0.0.22:2888:3888server.3=10.0.0.23:2888:3888EOF#开启端口firewall-cmd --permanent --zone=public --add-port=2181/tcp;firewall-cmd --permanent --zone=public --add-port=2888/tcp;firewall-cmd --permanent --zone=public --add-port=3888/tcp;firewall-cmd --reload#下载镜像docker pull zookeeper:3.7.0#创立容器docker run -d \-p 2181:2181 \-p 2888:2888 \-p 3888:3888 \--network=es-net \--name zookeeper \--ip=10.10.10.31 \--privileged=true \--restart always \-v /data/modules/zookeeper/data:/data \-v /data/modules/zookeeper/logs:/logs \-v /data/modules/zookeeper/data/myid:/data/myid \-v /data/modules/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \zookeeper:3.7.0#查看容器docker ps#装置Kafkadocker pull wurstmeister/kafka#创立日志目录mkdir -p /data/modules/kafka/logs#创立容器docker run -d --name kafka \--publish 9092:9092 \--network=es-net \--ip=10.10.10.41 \--privileged=true \--restart always \--link zookeeper \--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.21 \--env KAFKA_ADVERTISED_PORT=9092  \--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \-v /data/modules/kafka/logs:/kafka/kafka-logs-1  \wurstmeister/kafka:2.13-2.8.1#下载镜像docker pull sheepkiller/kafka-manager:alpine#kafka-managerdocker run -itd --restart=always \--name=kafka-manager \-p 9000:9000 \--network=es-net \--ip=10.10.10.51 \--privileged=true \-e ZK_HOSTS="10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181" \sheepkiller/kafka-manager:alpine

Node-2

#Zookeeper-1#ELK同网络docker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \es-net#创立映射目录mkdir -p /data/modules/zookeeper/datamkdir -p /data/modules/zookeeper/logsmkdir -p /data/modules/zookeeper/conf#(myid)须要与server.(id)雷同 cd /data/modules/zookeeper/datacat > myid << EOF2EOF#写入配置文件cd /data/modules/zookeeper/conf/cat > zoo.cfg << EOF#集群节点间心跳查看距离,单位是毫秒,后续所有和工夫相干的配置都是该值的倍数,进行整数倍的配置,如4等于8000tickTime=2000#集群其余节点与Master通信实现的初始通信工夫限度,这里代表10*2000initLimit=10#若Master节点在超过syncLimit*tickTime的工夫还未收到响应,认为该节点宕机syncLimit=5#数据寄存目录dataDir=/data#ZK日志文件寄存门路dataLogDir=/logs#ZK服务端口clientPort=2181#单个客户端最大连接数限度,0代表不限度maxClientCnxns=60#快照文件保留的数量autopurge.snapRetainCount=3#清理快照文件和事务日志文件的频率,默认为0代表不开启,单位是小时autopurge.purgeInterval=1#server.A=B:C:D 集群设置,#A示意第几号服务器;#B是IP;#C是该服务器与leader通信端口;#D是leader挂掉后从新选举所用通信端口;两个端口号能够随便server.1=10.0.0.21:2888:3888server.2=0.0.0.0:2888:3888server.3=10.0.0.23:2888:3888EOF#开启端口firewall-cmd --permanent --zone=public --add-port=2181/tcp;firewall-cmd --permanent --zone=public --add-port=2888/tcp;firewall-cmd --permanent --zone=public --add-port=3888/tcp;firewall-cmd --reload#下载镜像docker pull zookeeper:3.7.0#创立容器docker run -d \-p 2181:2181 \-p 2888:2888 \-p 3888:3888 \--network=es-net \--name zookeeper \--ip=10.10.10.32 \--privileged=true \--restart always \-v /data/modules/zookeeper/data:/data \-v /data/modules/zookeeper/logs:/logs \-v /data/modules/zookeeper/data/myid:/data/myid \-v /data/modules/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \zookeeper:3.7.0#查看容器docker ps#装置Kafkadocker pull wurstmeister/kafka#创立日志目录mkdir -p /data/modules/kafka/logs#创立容器docker run -d --name kafka \--publish 9092:9092 \--network=es-net \--ip=10.10.10.42 \--privileged=true \--restart always \--link zookeeper \--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.22 \--env KAFKA_ADVERTISED_PORT=9092  \--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \-v /data/modules/kafka/logs:/kafka/kafka-logs-1  \wurstmeister/kafka#下载镜像docker pull sheepkiller/kafka-manager:alpine#kafka-managerdocker run -itd --restart=always \--name=kafka-manager \-p 9000:9000 \--network=es-net \--ip=10.10.10.52 \--privileged=true \-e ZK_HOSTS="10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181" \sheepkiller/kafka-manager:alpine

Node-3

#Zookeeper-1#ELK同网络docker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \es-net#创立映射目录mkdir -p /data/modules/zookeeper/datamkdir -p /data/modules/zookeeper/logsmkdir -p /data/modules/zookeeper/conf#(myid)须要与server.(id)雷同 cd /data/modules/zookeeper/datacat > myid << EOF3EOF#写入配置文件cd /data/modules/zookeeper/conf/cat > zoo.cfg << EOF#集群节点间心跳查看距离,单位是毫秒,后续所有和工夫相干的配置都是该值的倍数,进行整数倍的配置,如4等于8000tickTime=2000#集群其余节点与Master通信实现的初始通信工夫限度,这里代表10*2000initLimit=10#若Master节点在超过syncLimit*tickTime的工夫还未收到响应,认为该节点宕机syncLimit=5#数据寄存目录dataDir=/data#ZK日志文件寄存门路dataLogDir=/logs#ZK服务端口clientPort=2181#单个客户端最大连接数限度,0代表不限度maxClientCnxns=60#快照文件保留的数量autopurge.snapRetainCount=3#清理快照文件和事务日志文件的频率,默认为0代表不开启,单位是小时autopurge.purgeInterval=1#server.A=B:C:D 集群设置,#A示意第几号服务器;#B是IP;#C是该服务器与leader通信端口;#D是leader挂掉后从新选举所用通信端口;两个端口号能够随便server.1=10.0.0.21:2888:3888server.2=10.0.0.22:2888:3888server.3=0.0.0.0:2888:3888EOF#开启端口firewall-cmd --permanent --zone=public --add-port=2181/tcp;firewall-cmd --permanent --zone=public --add-port=2888/tcp;firewall-cmd --permanent --zone=public --add-port=3888/tcp;firewall-cmd --reload#下载镜像docker pull zookeeper:3.7.0#创立容器docker run -d \-p 2181:2181 \-p 2888:2888 \-p 3888:3888 \--network=es-net \--name zookeeper \--ip=10.10.10.33 \--privileged=true \--restart always \-v /data/modules/zookeeper/data:/data \-v /data/modules/zookeeper/logs:/logs \-v /data/modules/zookeeper/data/myid:/data/myid \-v /data/modules/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \zookeeper:3.7.0#查看容器docker ps#装置Kafkadocker pull wurstmeister/kafka#创立日志目录mkdir -p /data/modules/kafka/logs#创立容器docker run -d --name kafka \--publish 9092:9092 \--network=es-net \--ip=10.10.10.43 \--privileged=true \--restart always \--link zookeeper \--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.23 \--env KAFKA_ADVERTISED_PORT=9092  \--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \-v /data/modules/kafka/logs:/kafka/kafka-logs-1  \wurstmeister/kafka#下载镜像docker pull sheepkiller/kafka-manager:alpine#kafka-managerdocker run -itd --restart=always \--name=kafka-manager \-p 9000:9000 \--network=es-net \--ip=10.10.10.53 \--privileged=true \-e ZK_HOSTS="10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181" \sheepkiller/kafka-manager:alpine

Logstash

master

#装置Dockersudo yum update -ysudo yum install -y yum-utils device-mapper-persistent-data lvm2sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposudo yum install docker-ce -y;docker -v;mkdir -p /etc/docker;cd /etc/docker;cat > daemon.json <<EOF{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]}EOF#装置docker-composesudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-composechmod +x /usr/local/bin/docker-composedocker-compose -v#重启dockersystemctl daemon-reload;systemctl restart docker;docker info;systemctl enable dockersystemctl start dockersystemctl status dockerdocker ps -a#开启防火墙firewall-cmd --zone=public --add-port=5044/tcp --permanent;firewall-cmd --zone=public --add-service=http --permanent;firewall-cmd --zone=public --add-service=https --permanent;firewall-cmd --reload;firewall-cmd --list-all;#对立目录if [ ! -d "/data/software" ]; then    mkdir -p /data/software/fiif [ ! -d "/data/modules/mysql/" ]; then    mkdir -p /data/modules/fimkdir -p /data/modules/logstash/conf/cd /data/modules/logstash/conf/cat > logstash.yml << EOFhttp.host: "0.0.0.0"path.config: /usr/share/logstash/config/conf.d/*.confpath.logs: /usr/share/logstash/logsxpack.monitoring.enabled: truexpack.monitoring.elasticsearch.username: elasticxpack.monitoring.elasticsearch.password: elasticxpack.monitoring.elasticsearch.hosts: [ "http://10.0.0.21:9200","http://10.0.0.22:9200","http://10.0.0.23:9200" ]EOFcat > conf.d << EOFinput {  beats {    port => 5044  }  file {    #Nginx日志目录    path => "/usr/local/nginx/logs/access.log"    start_position => "beginning"  }}filter {  if [path] =~ "access" {    mutate { replace => { "type" => "nginx_access" } }    grok {      match => { "message" => "%{COMBINEDAPACHELOG}" }    }  }  date {    #工夫戳    match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]  }}output {  elasticsearch {    #承受主机    hosts => ["${NODE_1_IP}:9200","${NODE_2_IP}:9200","${NODE_3_IP}:9200"]  }  stdout { codec => rubydebug }}EOF#创立容器网络docker network create \--driver=bridge \--subnet=10.10.10.0/24 \--ip-range=10.10.10.0/24 \--gateway=10.10.10.254 \elk-net#下载镜像docker pull elastic/logstash:7.17.0#启动容器docker run -dit --name=logstash \-d --network=elk-net \--ip=10.10.10.12 \--publish 5044:5044 \--restart=always --privileged=true \-e ES_JAVA_OPTS="-Xms512m -Xmx512m" \-v /data/modules/logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml \-v /data/modules/logstash/conf/conf.d/:/usr/share/logstash/config/conf.d/ \elastic/logstash:7.17.0

Filebeat

client

Filebeat
mysql
#!/bin/bashMYSQL_V=5.7.37TMP_DIR=/tmp INSTALL_DIR=/usr/localfunction install_mysql(){    MYSQL_BASE=/usr/local/mysql    cd $TMP_DIR        file="mysql-5.7.37-linux-glibc2.12-x86_64.tar.gz"    if [ ! -f $file ]; then        echo "File not found!"        yum install -y wget && wget -c wget https://cdn.mysql.com//Downloads/MySQL-5.7/mysql-5.7.37-linux-glibc2.12-x86_64.tar.gz;        echo "下载实现,正在解压.......";        tar -zxvf mysql-5.7.37-linux-glibc2.12-x86_64.tar.gz        mv mysql-5.7.37-linux-glibc2.12-x86_64 /usr/local/mysql        cd /usr/local/mysql        #exit 0    fi    echo "创立用户组"    userdel mysql;    groupadd mysql;    useradd -r -g mysql mysql;    mkdir -p  /data/mysql;    chown mysql:mysql -R /data/mysql;cd /etcecho "写入配置文件"cat > my.cnf <<EOF[mysqld]bind-address=0.0.0.0                    #绑定地址运行近程连贯port=3306                               #Mysql凋谢的端口user=mysql                              #数据库登录用户basedir=/usr/local/mysql                #Mysql装置的绝对路径datadir=/data/mysql                     #Mysql数据寄存的绝对路径socket=/tmp/mysql.sock                  #套接字文件log-error=/data/mysql/mysql.err         #mysql生成的谬误日志寄存的门路pid-file=/data/mysql/mysql.pid          #为mysqld程序指定一个寄存过程ID的文件character_set_server=utf8mb4            #数据库字符编码symbolic-links=0                        #是否开启链接符号explicit_defaults_for_timestamp=true    #数据库timestamp类型的列自动更新EOFecho "初始化Mysql"cd /usr/local/mysql/bin/./mysqld --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql/ --datadir=/data/mysql/ --user=mysql --initializesleep 2secho "启动mysql"cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqlservice mysql startservice mysql statusln -s  /usr/local/mysql/bin/mysql    /usr/binecho "获取mysql初始密码"PASSWORD=`cat /data/mysql/mysql.err |grep "temporary password"|awk -F"root@localhost: " '{print $2}'`echo "批改mysql明码"$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock --connect-expired-password -uroot -p${PASSWORD} -e "ALTER USER 'root'@'localhost' IDENTIFIED BY 'root@.com';"$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "FLUSH PRIVILEGES;"$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "USE mysql;"$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "UPDATE user SET host = '%' WHERE user = 'root';"$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "FLUSH PRIVILEGES;"$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "exit;"echo "重启数据库"service mysql restart;service mysql status;service mysql stop;sleep 2s;echo "以服务项启动"cd /usr/lib/systemd/systemcat > mysq.service << EOF[Unit]Description=MysqlAfter=syslog.target network.target remote-fs.target nss-lookup.target[Service]Type=forkingPIDFile=/data/mysql/mysql.pidExecStart=/usr/local/mysql/support-files/mysql.server startExecReload=/bin/kill -s HUP $MAINPIDExecStop=/bin/kill -s QUIT $MAINPIDPrivateTmp=false[Install]WantedBy=multi-user.targetEOFsystemctl start mysqlsystemctl enable mysqlsystemctl status mysql#cp /usr/local/mysql/support-files/mysql.server /etc/rc.d/init.d/mysqld;#chmod +x /etc/init.d/mysqld;#chkconfig --add mysqld;#chkconfig --list;firewall-cmd --zone=public --add-port=3306/tcp --permanent;firewall-cmd --reload;firewall-cmd --list-all;echo "=========> MYSQL信息 <========="echo " 数据库版本 : 5.7.37           "echo " 数据库明码 : root@.com        "echo " 数据库端口 : 3306             "echo " BASEDIR目录: /usr/local/mysql "echo " DATADIR目录: /data/mysql      "}install_mysql
nginx
#!/bin/bash#Nginx版本NGINX_V=1.20.0#Nginx下载目录TMP_DIR=/tmp #Nginx装置目录INSTALL_DIR=/usr/localfunction install_nginx() {#下载依赖yum -y install gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel#下载Nginxcd ${TMP_DIR}yum install -y wget && wget -c wget http://nginx.org/download/nginx-${NGINX_V}.tar.gz#解压源码tar -zxvf ${TMP_DIR}/nginx-${NGINX_V}.tar.gzmv nginx-${NGINX_V} nginx;cd nginx;#预编译配置./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_modulesleep 2s#编译装置make && make install#以服务启动cd /usr/lib/systemd/system;cat > nginx.service <<EOF[Unit]Description=nginx - high performance web serverDocumentation=http://nginx.org/en/docs/After=network.target remote-fs.target nss-lookup.target  [Service]Type=forkingPIDFile=/usr/local/nginx/logs/nginx.pidExecStartPre=/usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.confExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.confExecReload=/bin/kill -s HUP $MAINPIDExecStop=/bin/kill -s QUIT $MAINPIDPrivateTmp=true  [Install]WantedBy=multi-user.targetEOFsystemctl restart firewalld;firewall-cmd --reload;systemctl start nginx;systemctl enable nginx;systemctl status nginx.service;}install_nginx
redis
#!/bin/bashsudo yum install net-tools -yIP=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"|grep "10."`REDIS_PASSWD=123456#编译环境yum -y install centos-release-scl devtoolset-9-gccyum -y install devtoolset-9-gcc-c++ devtoolset-9-binutilsscl enable devtoolset-9 bashecho "source /opt/rh/devtoolset-9/enable" >> /etc/profilegcc -vinstall_redis(){#对立目录if [ ! -d "/data/software" ]; then    mkdir -p /data/software/fi#近程下载cd /data/software/file="redis-6.2.6.tar.gz"if [ ! -f $file ]; then    yum install -y wget && wget http://download.redis.io/releases/redis-6.2.6.tar.gz    #exit 0fi#解压编译cd /data/softwaretar -zxvf redis-6.2.6.tar.gz -C /usr/local/cd /usr/local/mv redis-6.2.6 rediscd redissudo makesleep 2ssudo make PREFIX=/usr/local/redis installmkdir /usr/local/redis/etc/cp /usr/local/redis/redis.conf /usr/local/redis/redis.conf.bak#写入配置cd /usr/local/rediscat > redis.conf << EOF//是否后盾运行,no不是,yes是daemonize no//端口 默认 6379port 6379//日志文件地址logfile "/var/log/redis.log"//如果要开启外网拜访请批改上面值bind 127.0.0.1protected-mode yes//明码requirepass 123456protected-mode no EOF#环境变量cd /etc/cat >> profile << EOFexport REDIS_HOME=/usr/local/redisexport PATH=$PATH:$REDIS_HOME/bin/EOFsource /etc/profile#端口firewall-cmd --permanent --zone=public --add-port=6379/tcpfirewall-cmd --reload#启动rediscd /usr/local/redis/binln -s /usr/local/redis/bin/redis-server /usr/bin/redis-serverln -s /usr/local/redis/bin/redis-cli /usr/bin/redis-cliredis-server}install_redis