共计 25481 个字符,预计需要花费 64 分钟才能阅读完成。
Elasticsearch
Node-1
# 装置 docker
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]
}
EOF
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
#创立目录上传证书
mkdir -p /data/modules/es/certs
#上传证书到 /data/modules/es/config/certs
#链接:https://pan.baidu.com/s/1_-RojJNqt1w_o8P9ugDyTQ 提取码:o93o
mkdir -p /data/modules/es/config
mkdir -p /data/modules/es/logs
mkdir -p /data/modules/es/data
mkdir -p /data/modules/es/plugins
## 写入配置文件
cd /data/modules/es/config
cat > elasticsearch.yml << EOF
cluster.name: es-cluster
node.name: node-1
network.host: 0.0.0.0
network.publish_host: 10.0.0.21
http.port: 9200
transport.port: 9300
bootstrap.memory_lock: true
discovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]
cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"
xpack.monitoring.collection.enabled: true
xpack.monitoring.exporters.my_local.type: local
xpack.monitoring.exporters.my_local.use_ingest: false
EOF
#受权开始口
chown es:es /data/modules -R
firewall-cmd --zone=public --add-port=9100/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-port=9300/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#容器网络
systemctl restart docker
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
es-net
## 创立容器
docker run --name es \
-d --network=es-net \
--ip=10.10.10.21 \
--restart=always \
--publish 9200:9200 \
--publish 9300:9300 \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=2G \
--memory-swap=-1 \
--volume /data/modules/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume /data/modules/es/data:/usr/share/elasticsearch/data \
--volume /data/modules/es/logs:/usr/share/elasticsearch/logs \
--volume /data/modules/es/certs:/usr/share/elasticsearch/config/certs \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='elastic' \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-e path.data=data \
-e path.logs=logs \
-e node.master=true \
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e gateway.recover_after_nodes=1 \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.17.0
#登陆账号密码
elastic
elastic
Node-2
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]
}
EOF
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
#创立目录上传证书
mkdir -p /data/modules/es/certs
#上传证书到 /data/modules/es/config/certs
#链接:https://pan.baidu.com/s/1_-RojJNqt1w_o8P9ugDyTQ 提取码:o93o
mkdir -p /data/modules/es/config
mkdir -p /data/modules/es/logs
mkdir -p /data/modules/es/data
mkdir -p /data/modules/es/plugins
## 写入配置文件
cd /data/modules/es/config
cat > elasticsearch.yml << EOF
cluster.name: es-cluster
node.name: node-2
network.host: 0.0.0.0
network.publish_host: 10.0.0.22
http.port: 9200
transport.port: 9300
bootstrap.memory_lock: true
discovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]
cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"
xpack.monitoring.collection.enabled: true
xpack.monitoring.exporters.my_local.type: local
xpack.monitoring.exporters.my_local.use_ingest: false
EOF
#受权开始口
chown es:es /data/modules -R
firewall-cmd --zone=public --add-port=9100/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-port=9300/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#容器网络
systemctl restart docker
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
es-net
## 创立容器
docker run --name es \
-d --network=es-net \
--ip=10.10.10.22 \
--restart=always \
--publish 9200:9200 \
--publish 9300:9300 \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=2G \
--memory-swap=-1 \
--volume /data/modules/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume /data/modules/es/data:/usr/share/elasticsearch/data \
--volume /data/modules/es/logs:/usr/share/elasticsearch/logs \
--volume /data/modules/es/certs:/usr/share/elasticsearch/config/certs \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='elastic' \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-e path.data=data \
-e path.logs=logs \
-e node.master=true \
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e gateway.recover_after_nodes=1 \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.17.0
#登陆账号密码
elastic
elastic
node-3
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]
}
EOF
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
#创立目录上传证书
mkdir -p /data/modules/es/certs
#上传证书到 /data/modules/es/config/certs
#链接:https://pan.baidu.com/s/1_-RojJNqt1w_o8P9ugDyTQ 提取码:o93o
mkdir -p /data/modules/es/config
mkdir -p /data/modules/es/logs
mkdir -p /data/modules/es/data
mkdir -p /data/modules/es/plugins
## 写入配置文件
cd /data/modules/es/config
cat > elasticsearch.yml << EOF
cluster.name: es-cluster
node.name: node-3
network.host: 0.0.0.0
network.publish_host: 10.0.0.23
http.port: 9200
transport.port: 9300
bootstrap.memory_lock: true
discovery.seed_hosts: ["10.0.0.21:9300","10.0.0.22:9300","10.0.0.23:9300"]
cluster.initial_master_nodes: ["10.0.0.21","10.0.0.22","10.0.0.23"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: "certs/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "certs/elastic-certificates.p12"
xpack.monitoring.collection.enabled: true
xpack.monitoring.exporters.my_local.type: local
xpack.monitoring.exporters.my_local.use_ingest: false
EOF
#受权开始口
chown es:es /data/modules -R
firewall-cmd --zone=public --add-port=9100/tcp --permanent;
firewall-cmd --zone=public --add-port=9200/tcp --permanent;
firewall-cmd --zone=public --add-port=9300/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#容器网络
systemctl restart docker
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
es-net
## 创立容器
docker run --name es \
-d --network=es-net \
--ip=10.10.10.23 \
--restart=always \
--publish 9200:9200 \
--publish 9300:9300 \
--privileged=true \
--ulimit nofile=655350 \
--ulimit memlock=-1 \
--memory=2G \
--memory-swap=-1 \
--volume /data/modules/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
--volume /data/modules/es/data:/usr/share/elasticsearch/data \
--volume /data/modules/es/logs:/usr/share/elasticsearch/logs \
--volume /data/modules/es/certs:/usr/share/elasticsearch/config/certs \
--volume /etc/localtime:/etc/localtime \
-e TERM=dumb \
-e ELASTIC_PASSWORD='elastic' \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-e path.data=data \
-e path.logs=logs \
-e node.master=true \
-e node.data=true \
-e node.ingest=false \
-e node.attr.rack="0402-K03" \
-e gateway.recover_after_nodes=1 \
-e bootstrap.memory_lock=true \
-e bootstrap.system_call_filter=false \
-e indices.fielddata.cache.size="25%" \
elasticsearch:7.17.0
#登陆账号密码
elastic
elastic
Kibana
Master
# 装置 Docker
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]
}
EOF
#装置 docker-compose
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
#重启 docker
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
#开启防火墙
firewall-cmd --zone=public --add-port=5601/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#创立容器网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
elk-net
#下载镜像
docker pull kibana:7.17.0
#装置 Kibana
docker run --name kibana \
-d --network=elk-net \
--ip=10.10.10.11 \
--restart=always \
--publish 5601:5601 \
--privileged=true \
kibana:7.17.0
#进入容器
docker exec -it -u root kibana /bin/bash
cd /usr/share/kibana/config
cat > kibana.yml << EOF
server.name: kibana
server.port: 5601
i18n.locale: "zh-CN"
server.host: "0.0.0.0"
kibana.index: ".kibana"
server.shutdownTimeout: "5s"
server.publicBaseUrl: "http://10.0.0.11:5601"
monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.hosts: ["http://10.0.0.21:9200","http://10.0.0.22:9200","http://10.0.0.23:9200"]
elasticsearch.username: "elastic"
elasticsearch.password: "elastic"
EOF
exit
docker restart kibana
Zookeeper+Kafka
Node-1
#Zookeeper-1
#ELK 同网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
es-net
#创立映射目录
mkdir -p /data/modules/zookeeper/data
mkdir -p /data/modules/zookeeper/logs
mkdir -p /data/modules/zookeeper/conf
#(myid) 须要与 server.(id) 雷同
cd /data/modules/zookeeper/data
cat > myid << EOF
1
EOF
#写入配置文件
cd /data/modules/zookeeper/conf/
cat > zoo.cfg << EOF
#集群节点间心跳查看距离,单位是毫秒,后续所有和工夫相干的配置都是该值的倍数,进行整数倍的配置,如 4 等于 8000
tickTime=2000
#集群其余节点与 Master 通信实现的初始通信工夫限度,这里代表 10*2000
initLimit=10
#若 Master 节点在超过 syncLimit*tickTime 的工夫还未收到响应,认为该节点宕机
syncLimit=5
#数据寄存目录
dataDir=/data
#ZK 日志文件寄存门路
dataLogDir=/logs
#ZK 服务端口
clientPort=2181
#单个客户端最大连接数限度,0 代表不限度
maxClientCnxns=60
#快照文件保留的数量
autopurge.snapRetainCount=3
#清理快照文件和事务日志文件的频率,默认为 0 代表不开启,单位是小时
autopurge.purgeInterval=1
#server.A=B:C:D 集群设置,#A 示意第几号服务器;#B 是 IP;
#C 是该服务器与 leader 通信端口;#D 是 leader 挂掉后从新选举所用通信端口;两个端口号能够随便
server.1=0.0.0.0:2888:3888
server.2=10.0.0.22:2888:3888
server.3=10.0.0.23:2888:3888
EOF
#开启端口
firewall-cmd --permanent --zone=public --add-port=2181/tcp;
firewall-cmd --permanent --zone=public --add-port=2888/tcp;
firewall-cmd --permanent --zone=public --add-port=3888/tcp;
firewall-cmd --reload
#下载镜像
docker pull zookeeper:3.7.0
#创立容器
docker run -d \
-p 2181:2181 \
-p 2888:2888 \
-p 3888:3888 \
--network=es-net \
--name zookeeper \
--ip=10.10.10.31 \
--privileged=true \
--restart always \
-v /data/modules/zookeeper/data:/data \
-v /data/modules/zookeeper/logs:/logs \
-v /data/modules/zookeeper/data/myid:/data/myid \
-v /data/modules/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \
zookeeper:3.7.0
#查看容器
docker ps
#装置 Kafka
docker pull wurstmeister/kafka
#创立日志目录
mkdir -p /data/modules/kafka/logs
#创立容器
docker run -d --name kafka \
--publish 9092:9092 \
--network=es-net \
--ip=10.10.10.41 \
--privileged=true \
--restart always \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.21 \
--env KAFKA_ADVERTISED_PORT=9092 \
--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \
-v /data/modules/kafka/logs:/kafka/kafka-logs-1 \
wurstmeister/kafka:2.13-2.8.1
#下载镜像
docker pull sheepkiller/kafka-manager:alpine
#kafka-manager
docker run -itd --restart=always \
--name=kafka-manager \
-p 9000:9000 \
--network=es-net \
--ip=10.10.10.51 \
--privileged=true \
-e ZK_HOSTS="10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181" \
sheepkiller/kafka-manager:alpine
Node-2
#Zookeeper-1
#ELK 同网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
es-net
#创立映射目录
mkdir -p /data/modules/zookeeper/data
mkdir -p /data/modules/zookeeper/logs
mkdir -p /data/modules/zookeeper/conf
#(myid) 须要与 server.(id) 雷同
cd /data/modules/zookeeper/data
cat > myid << EOF
2
EOF
#写入配置文件
cd /data/modules/zookeeper/conf/
cat > zoo.cfg << EOF
#集群节点间心跳查看距离,单位是毫秒,后续所有和工夫相干的配置都是该值的倍数,进行整数倍的配置,如 4 等于 8000
tickTime=2000
#集群其余节点与 Master 通信实现的初始通信工夫限度,这里代表 10*2000
initLimit=10
#若 Master 节点在超过 syncLimit*tickTime 的工夫还未收到响应,认为该节点宕机
syncLimit=5
#数据寄存目录
dataDir=/data
#ZK 日志文件寄存门路
dataLogDir=/logs
#ZK 服务端口
clientPort=2181
#单个客户端最大连接数限度,0 代表不限度
maxClientCnxns=60
#快照文件保留的数量
autopurge.snapRetainCount=3
#清理快照文件和事务日志文件的频率,默认为 0 代表不开启,单位是小时
autopurge.purgeInterval=1
#server.A=B:C:D 集群设置,#A 示意第几号服务器;#B 是 IP;
#C 是该服务器与 leader 通信端口;#D 是 leader 挂掉后从新选举所用通信端口;两个端口号能够随便
server.1=10.0.0.21:2888:3888
server.2=0.0.0.0:2888:3888
server.3=10.0.0.23:2888:3888
EOF
#开启端口
firewall-cmd --permanent --zone=public --add-port=2181/tcp;
firewall-cmd --permanent --zone=public --add-port=2888/tcp;
firewall-cmd --permanent --zone=public --add-port=3888/tcp;
firewall-cmd --reload
#下载镜像
docker pull zookeeper:3.7.0
#创立容器
docker run -d \
-p 2181:2181 \
-p 2888:2888 \
-p 3888:3888 \
--network=es-net \
--name zookeeper \
--ip=10.10.10.32 \
--privileged=true \
--restart always \
-v /data/modules/zookeeper/data:/data \
-v /data/modules/zookeeper/logs:/logs \
-v /data/modules/zookeeper/data/myid:/data/myid \
-v /data/modules/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \
zookeeper:3.7.0
#查看容器
docker ps
#装置 Kafka
docker pull wurstmeister/kafka
#创立日志目录
mkdir -p /data/modules/kafka/logs
#创立容器
docker run -d --name kafka \
--publish 9092:9092 \
--network=es-net \
--ip=10.10.10.42 \
--privileged=true \
--restart always \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.22 \
--env KAFKA_ADVERTISED_PORT=9092 \
--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \
-v /data/modules/kafka/logs:/kafka/kafka-logs-1 \
wurstmeister/kafka
#下载镜像
docker pull sheepkiller/kafka-manager:alpine
#kafka-manager
docker run -itd --restart=always \
--name=kafka-manager \
-p 9000:9000 \
--network=es-net \
--ip=10.10.10.52 \
--privileged=true \
-e ZK_HOSTS="10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181" \
sheepkiller/kafka-manager:alpine
Node-3
#Zookeeper-1
#ELK 同网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
es-net
#创立映射目录
mkdir -p /data/modules/zookeeper/data
mkdir -p /data/modules/zookeeper/logs
mkdir -p /data/modules/zookeeper/conf
#(myid) 须要与 server.(id) 雷同
cd /data/modules/zookeeper/data
cat > myid << EOF
3
EOF
#写入配置文件
cd /data/modules/zookeeper/conf/
cat > zoo.cfg << EOF
#集群节点间心跳查看距离,单位是毫秒,后续所有和工夫相干的配置都是该值的倍数,进行整数倍的配置,如 4 等于 8000
tickTime=2000
#集群其余节点与 Master 通信实现的初始通信工夫限度,这里代表 10*2000
initLimit=10
#若 Master 节点在超过 syncLimit*tickTime 的工夫还未收到响应,认为该节点宕机
syncLimit=5
#数据寄存目录
dataDir=/data
#ZK 日志文件寄存门路
dataLogDir=/logs
#ZK 服务端口
clientPort=2181
#单个客户端最大连接数限度,0 代表不限度
maxClientCnxns=60
#快照文件保留的数量
autopurge.snapRetainCount=3
#清理快照文件和事务日志文件的频率,默认为 0 代表不开启,单位是小时
autopurge.purgeInterval=1
#server.A=B:C:D 集群设置,#A 示意第几号服务器;#B 是 IP;
#C 是该服务器与 leader 通信端口;#D 是 leader 挂掉后从新选举所用通信端口;两个端口号能够随便
server.1=10.0.0.21:2888:3888
server.2=10.0.0.22:2888:3888
server.3=0.0.0.0:2888:3888
EOF
#开启端口
firewall-cmd --permanent --zone=public --add-port=2181/tcp;
firewall-cmd --permanent --zone=public --add-port=2888/tcp;
firewall-cmd --permanent --zone=public --add-port=3888/tcp;
firewall-cmd --reload
#下载镜像
docker pull zookeeper:3.7.0
#创立容器
docker run -d \
-p 2181:2181 \
-p 2888:2888 \
-p 3888:3888 \
--network=es-net \
--name zookeeper \
--ip=10.10.10.33 \
--privileged=true \
--restart always \
-v /data/modules/zookeeper/data:/data \
-v /data/modules/zookeeper/logs:/logs \
-v /data/modules/zookeeper/data/myid:/data/myid \
-v /data/modules/zookeeper/conf/zoo.cfg:/conf/zoo.cfg \
zookeeper:3.7.0
#查看容器
docker ps
#装置 Kafka
docker pull wurstmeister/kafka
#创立日志目录
mkdir -p /data/modules/kafka/logs
#创立容器
docker run -d --name kafka \
--publish 9092:9092 \
--network=es-net \
--ip=10.10.10.43 \
--privileged=true \
--restart always \
--link zookeeper \
--env KAFKA_ZOOKEEPER_CONNECT=10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181 \
--env KAFKA_ADVERTISED_HOST_NAME=10.0.0.23 \
--env KAFKA_ADVERTISED_PORT=9092 \
--env KAFKA_LOG_DIRS=/kafka/kafka-logs-1 \
-v /data/modules/kafka/logs:/kafka/kafka-logs-1 \
wurstmeister/kafka
#下载镜像
docker pull sheepkiller/kafka-manager:alpine
#kafka-manager
docker run -itd --restart=always \
--name=kafka-manager \
-p 9000:9000 \
--network=es-net \
--ip=10.10.10.53 \
--privileged=true \
-e ZK_HOSTS="10.0.0.21:2181,10.0.0.22:2181,10.0.0.23:2181" \
sheepkiller/kafka-manager:alpine
Logstash
master
# 装置 Docker
sudo yum update -y
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum install docker-ce -y;
docker -v;
mkdir -p /etc/docker;
cd /etc/docker;
cat > daemon.json <<EOF
{"registry-mirrors": \["https://docker.mirrors.ustc.edu.cn"\]
}
EOF
#装置 docker-compose
sudo curl -L "https://get.daocloud.io/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
#重启 docker
systemctl daemon-reload;
systemctl restart docker;
docker info;
systemctl enable docker
systemctl start docker
systemctl status docker
docker ps -a
#开启防火墙
firewall-cmd --zone=public --add-port=5044/tcp --permanent;
firewall-cmd --zone=public --add-service=http --permanent;
firewall-cmd --zone=public --add-service=https --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
#对立目录
if [! -d "/data/software"]; then
mkdir -p /data/software/
fi
if [! -d "/data/modules/mysql/"]; then
mkdir -p /data/modules/
fi
mkdir -p /data/modules/logstash/conf/
cd /data/modules/logstash/conf/
cat > logstash.yml << EOF
http.host: "0.0.0.0"
path.config: /usr/share/logstash/config/conf.d/*.conf
path.logs: /usr/share/logstash/logs
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: elastic
xpack.monitoring.elasticsearch.hosts: ["http://10.0.0.21:9200","http://10.0.0.22:9200","http://10.0.0.23:9200"]
EOF
cat > conf.d << EOF
input {
beats {port => 5044}
file {
#Nginx 日志目录
path => "/usr/local/nginx/logs/access.log"
start_position => "beginning"
}
}
filter {if [path] =~ "access" {mutate { replace => { "type" => "nginx_access"} }
grok {match => { "message" => "%{COMBINEDAPACHELOG}" }
}
}
date {
#工夫戳
match => ["timestamp" , "dd/MMM/yyyy:HH:mm:ss Z"]
}
}
output {
elasticsearch {
#承受主机
hosts => ["${NODE_1_IP}:9200","${NODE_2_IP}:9200","${NODE_3_IP}:9200"]
}
stdout {codec => rubydebug}
}
EOF
#创立容器网络
docker network create \
--driver=bridge \
--subnet=10.10.10.0/24 \
--ip-range=10.10.10.0/24 \
--gateway=10.10.10.254 \
elk-net
#下载镜像
docker pull elastic/logstash:7.17.0
#启动容器
docker run -dit --name=logstash \
-d --network=elk-net \
--ip=10.10.10.12 \
--publish 5044:5044 \
--restart=always --privileged=true \
-e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-v /data/modules/logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml \
-v /data/modules/logstash/conf/conf.d/:/usr/share/logstash/config/conf.d/ \
elastic/logstash:7.17.0
Filebeat
client
Filebeat
mysql
#!/bin/bash
MYSQL_V=5.7.37
TMP_DIR=/tmp
INSTALL_DIR=/usr/local
function install_mysql(){
MYSQL_BASE=/usr/local/mysql
cd $TMP_DIR
file="mysql-5.7.37-linux-glibc2.12-x86_64.tar.gz"
if [! -f $file]; then
echo "File not found!"
yum install -y wget && wget -c wget https://cdn.mysql.com//Downloads/MySQL-5.7/mysql-5.7.37-linux-glibc2.12-x86_64.tar.gz;
echo "下载实现, 正在解压.......";
tar -zxvf mysql-5.7.37-linux-glibc2.12-x86_64.tar.gz
mv mysql-5.7.37-linux-glibc2.12-x86_64 /usr/local/mysql
cd /usr/local/mysql
#exit 0
fi
echo "创立用户组"
userdel mysql;
groupadd mysql;
useradd -r -g mysql mysql;
mkdir -p /data/mysql;
chown mysql:mysql -R /data/mysql;
cd /etc
echo "写入配置文件"
cat > my.cnf <<EOF
[mysqld]
bind-address=0.0.0.0 #绑定地址运行近程连贯
port=3306 #Mysql 凋谢的端口
user=mysql #数据库登录用户
basedir=/usr/local/mysql #Mysql 装置的绝对路径
datadir=/data/mysql #Mysql 数据寄存的绝对路径
socket=/tmp/mysql.sock #套接字文件
log-error=/data/mysql/mysql.err #mysql 生成的谬误日志寄存的门路
pid-file=/data/mysql/mysql.pid #为 mysqld 程序指定一个寄存过程 ID 的文件
character_set_server=utf8mb4 #数据库字符编码
symbolic-links=0 #是否开启链接符号
explicit_defaults_for_timestamp=true #数据库 timestamp 类型的列自动更新
EOF
echo "初始化 Mysql"
cd /usr/local/mysql/bin/
./mysqld --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql/ --datadir=/data/mysql/ --user=mysql --initialize
sleep 2s
echo "启动 mysql"
cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql
service mysql start
service mysql status
ln -s /usr/local/mysql/bin/mysql /usr/bin
echo "获取 mysql 初始密码"
PASSWORD=`cat /data/mysql/mysql.err |grep "temporary password"|awk -F"root@localhost:" '{print $2}'`
echo "批改 mysql 明码"
$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock --connect-expired-password -uroot -p${PASSWORD} -e "ALTER USER'root'@'localhost'IDENTIFIED BY'root@.com';"
$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "FLUSH PRIVILEGES;"
$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "USE mysql;"
$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "UPDATE user SET host ='%'WHERE user ='root';"
$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "FLUSH PRIVILEGES;"
$MYSQL_BASE/bin/mysql --socket=/tmp/mysql.sock -uroot -proot@.com -e "exit;"
echo "重启数据库"
service mysql restart;
service mysql status;
service mysql stop;
sleep 2s;
echo "以服务项启动"
cd /usr/lib/systemd/system
cat > mysq.service << EOF
[Unit]
Description=Mysql
After=syslog.target network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
PIDFile=/data/mysql/mysql.pid
ExecStart=/usr/local/mysql/support-files/mysql.server start
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/bin/kill -s QUIT $MAINPID
PrivateTmp=false
[Install]
WantedBy=multi-user.target
EOF
systemctl start mysql
systemctl enable mysql
systemctl status mysql
#cp /usr/local/mysql/support-files/mysql.server /etc/rc.d/init.d/mysqld;
#chmod +x /etc/init.d/mysqld;
#chkconfig --add mysqld;
#chkconfig --list;
firewall-cmd --zone=public --add-port=3306/tcp --permanent;
firewall-cmd --reload;
firewall-cmd --list-all;
echo "=========> MYSQL 信息 <========="
echo "数据库版本 : 5.7.37"
echo "数据库明码 : root@.com"
echo "数据库端口 : 3306"
echo "BASEDIR 目录: /usr/local/mysql"
echo "DATADIR 目录: /data/mysql"
}
install_mysql
nginx
#!/bin/bash
#Nginx 版本
NGINX_V=1.20.0
#Nginx 下载目录
TMP_DIR=/tmp
#Nginx 装置目录
INSTALL_DIR=/usr/local
function install_nginx() {
#下载依赖
yum -y install gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel
#下载 Nginx
cd ${TMP_DIR}
yum install -y wget && wget -c wget http://nginx.org/download/nginx-${NGINX_V}.tar.gz
#解压源码
tar -zxvf ${TMP_DIR}/nginx-${NGINX_V}.tar.gz
mv nginx-${NGINX_V} nginx;cd nginx;
#预编译配置
./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module
sleep 2s
#编译装置
make && make install
#以服务启动
cd /usr/lib/systemd/system;
cat > nginx.service <<EOF
[Unit]
Description=nginx - high performance web server
Documentation=http://nginx.org/en/docs/
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.conf
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/bin/kill -s QUIT $MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
EOF
systemctl restart firewalld;firewall-cmd --reload;
systemctl start nginx;systemctl enable nginx;
systemctl status nginx.service;
}
install_nginx
redis
#!/bin/bash
sudo yum install net-tools -y
IP=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"|grep "10."`
REDIS_PASSWD=123456
#编译环境
yum -y install centos-release-scl devtoolset-9-gcc
yum -y install devtoolset-9-gcc-c++ devtoolset-9-binutils
scl enable devtoolset-9 bash
echo "source /opt/rh/devtoolset-9/enable" >> /etc/profile
gcc -v
install_redis(){
#对立目录
if [! -d "/data/software"]; then
mkdir -p /data/software/
fi
#近程下载
cd /data/software/
file="redis-6.2.6.tar.gz"
if [! -f $file]; then
yum install -y wget && wget http://download.redis.io/releases/redis-6.2.6.tar.gz
#exit 0
fi
#解压编译
cd /data/software
tar -zxvf redis-6.2.6.tar.gz -C /usr/local/
cd /usr/local/
mv redis-6.2.6 redis
cd redis
sudo make
sleep 2s
sudo make PREFIX=/usr/local/redis install
mkdir /usr/local/redis/etc/
cp /usr/local/redis/redis.conf /usr/local/redis/redis.conf.bak
#写入配置
cd /usr/local/redis
cat > redis.conf << EOF
// 是否后盾运行,no 不是,yes 是
daemonize no
// 端口 默认 6379
port 6379
// 日志文件地址
logfile "/var/log/redis.log"
// 如果要开启外网拜访请批改上面值
bind 127.0.0.1
protected-mode yes
// 明码
requirepass 123456
protected-mode no
EOF
#环境变量
cd /etc/
cat >> profile << EOF
export REDIS_HOME=/usr/local/redis
export PATH=$PATH:$REDIS_HOME/bin/
EOF
source /etc/profile
#端口
firewall-cmd --permanent --zone=public --add-port=6379/tcp
firewall-cmd --reload
#启动 redis
cd /usr/local/redis/bin
ln -s /usr/local/redis/bin/redis-server /usr/bin/redis-server
ln -s /usr/local/redis/bin/redis-cli /usr/bin/redis-cli
redis-server
}
install_redis
正文完