关于云原生:openEuler-部署KubernetesK8s集群

前言

因为工作起因须要应用 openEuler,openEuler官网文档部署K8s集群比较复杂,并且网上相干材料较少,本文是通过实际与测试整顿的 openEuler 22.03 部署 Kubernetes 1.20.2 集群操作方法。
这篇文章仅供学习参考,请勿间接用于生产环境。

1. 装置筹备

在开始之前,部署 Kubernetes 集群机器须要满足以下几个条件:

  • 操作系统:openEuler 22.03
  • 硬件配置:2GB或更多RAM,2个CPU或更多CPU,硬盘30GB或更多
  • 集群中所有机器之间网络互通
  • 能够拜访外网,须要拉取镜像

1.1 服务器布局

主机名称 角色 IP地址 配置
openEuler.master01 Master节点 192.168.123.208 CPU 2核,内存 4G,硬盘 40GB
openEuler.node01 Node节点 192.168.123.167 CPU 2核,内存 4G,硬盘 40GB
openEuler.node02 Node节点 192.168.123.213 CPU 2核,内存 4G,硬盘 40GB

1.2 服务器环境配置

  1. 批改主机名称
# master01 执行
hostnamectl set-hostname openEuler.master01
# node01 执行
hostnamectl set-hostname openEuler.node01
# node02 执行
hostnamectl set-hostname openEuler.node02
  1. 配置host映射
vim /etc/hosts

192.168.123.208 openEuler.master01
192.168.123.167 openEuler.node01
192.168.123.213 openEuler.node02
  1. 敞开swap
# 长期敞开swap分区
swapoff -a
  1. 敞开防火墙
# 敞开并禁用防火墙
systemctl stop firewalld && systemctl disable firewalld

2. Kubernetes集群装置

2.1 Master节点装置

2.1.1 装置Docker

# 装置docker
dnf install -y docker
# 启用docker
systemctl enable docker && systemctl start docker
# 查看docker版本
docker --version

2.1.2 装置配置Kubernetes组件

# 装置kubeadmin、kubelet、kubernetes-master
dnf install -y kubernetes-kubeadm kubernetes-kubelet kubernetes-master
# 装置conntrack组件(k8s依赖组件)
dnf install -y conntrack
# 配置kubelet开机自启
systemctl enable kubelet.service && systemctl start kubelet.service

# 装置Kubernetes,apiserver-advertise-address 请替换成理论环境中的master节点ip地址,本文环境应用192.168.123.208
kubeadm init --apiserver-advertise-address=192.168.123.208 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.20.2 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16
# 命令选项阐明:
# --apiserver-advertise-address:apiserver通告给其余组件的IP地址,个别应该为Master节点的用于集群外部通信的IP地址,0.0.0.0示意节点上所有可用地址
# --image-repository:指定要应用的镜像仓库,指定为aliyun镜像减速下载
# --kubernetes-version:Kubernetes程序组件的版本号
# --pod-network-cidr:Pod网络的地址范畴,其值为CIDR格局的网络地址
# --service-cidr:Service的网络地址范畴,其值为CIDR格局的网络地址

看到如下提醒装置胜利

保留kubeadm join信息

kubeadm join 192.168.123.208:6443 --token 9b3zg3.w9428fz00d993pwo --discovery-token-ca-cert-hash sha256:0287bffb9cc2c10f9ad53dcdc052462cae5ebef63cecb8d53ff689fb6e358b9e 

2.1.3 配置Kubectl

# 配置环境变量
vi /etc/profile

# 在文件底部追加
export KUBECONFIG=/etc/kubernetes/admin.conf
# 利用扭转
source /etc/profile

# 查看Master节点状态,此时节点会提醒Not Ready,持续下一步 2.1.4 操作
kubectl get nodes

2.1.4 配置网络插件

# containerd容器运行时 cni相干文件默认门路在/usr/libexec/cni
# flannel 默认读取/opt/cni/bin门路
# 拷贝相干文件
mkdir -p /opt/cni/bin
cp /usr/libexec/cni/* /opt/cni/bin/

# 以下两种办法依据理论状况任选一种装置
# 1. 服务器无法访问github资源-装置办法
# (迷信)下载kube-flannel.yml文件搁置到 /opt/yaml/kube-flannel.yml
# kube-flannel.yml文件链接:https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f /opt/yaml/kube-flannel.yaml
# 查看Master节点状态,此时会提醒Ready
kubectl get nodes

# 2. 服务器能够拜访github资源-装置办法
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 查看Master节点状态,此时节点会提醒Ready,Master节点装置实现
kubectl get nodes

附: 2022.12.29 下载的kube-flannel.yml文件,能够手动保留应用

---
kind: Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
- apiGroups:
  - "networking.k8s.io"
  resources:
  - clustercidrs
  verbs:
  - list
  - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        #image: flannelcni/flannel-cni-plugin:v1.1.2 #for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.2
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.20.2 #for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.20.2 #for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate

2.2 Node节点(两台)装置

2.2.1 装置Docker

# 装置docker
dnf install -y docker
# 启用docker
systemctl enable docker && systemctl start docker
# 查看docker版本
docker --version

2.2.2 装置配置Kubernetes组件

# 装置kubeadmin、kubelet、kubernetes-node
dnf install -y kubernetes-kubeadm kubernetes-kubelet kubernetes-node

# 配置kubelet开机自启
systemctl enable kubelet.service && systemctl start kubelet.service

# containerd 容器运行时cni相干文件默认门路在/usr/libexec/cni
# 拷贝相干文件到cni规范门路
mkdir -p /opt/cni/bin
cp /usr/libexec/cni/* /opt/cni/bin/

# 根据Master节点上创立的token开始join命令,此处能够从 2.1.2 kubeadm init的返回中复制。
kubeadm join 192.168.123.208:6443 --token 9b3zg3.w9428fz00d993pwo --discovery-token-ca-cert-hash sha256:0287bffb9cc2c10f9ad53dcdc052462cae5ebef63cecb8d53ff689fb6e358b9e 

见到如下提醒装置胜利:

回到Master节点查看状态,稍作期待三个节点都会变为Ready

# Master节点查看状态,稍作期待会提醒三个节点Ready
kubectl get nodes

3. 测试Kubernetes集群

  1. 在Kubernetes集群中创立一个pod,验证是否失常运行
# Master节点操作
# 创立nginx容器
kubectl create deployment nginx --image=nginx
# 裸露对外端口
kubectl expose deployment nginx --port=80 --type=NodePort
# 查看nginx是否运行胜利
kubectl get pod,svc

# 所有节点都能够拜访到Nginx
192.168.123.208:30116
192.168.123.167:30116
192.168.123.213:30116

  1. 扩容nginx正本测试
# 扩大正本数为3
kubectl scale deployment nginx --replicas=3
# 查看pod状态
kubectl get pods

如下图则扩大胜利


要是感觉文章对你有帮忙的话,欢送评论转发点赞~
更多乏味实用的内容,敬请关注公众号「岚山茶馆」。

评论

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注

这个站点使用 Akismet 来减少垃圾评论。了解你的评论数据如何被处理