kubeadm will do the following items:

  1. Preflight check
  2. Generate the keys and certs in /etc/kubernetes/pki
# lsapiserver.crt              apiserver-etcd-client.key  apiserver-kubelet-client.crt  ca.crt  etcd                front-proxy-ca.key      front-proxy-client.key  sa.pubapiserver-etcd-client.crt  apiserver.key              apiserver-kubelet-client.key  ca.key  front-proxy-ca.crt  front-proxy-client.crt  sa.key
  1. Generate the configuration file in /etc/kubernetes/xxx.conf
/etc/kubernetes]$ls *.confadmin.conf  controller-manager.conf  kubelet.conf  scheduler.conf
  1. Generate the yaml file to deploy kube-apiserver, kube-contaoller-manager, kube-scheduler, etcd.

如下图所示,k8s clutser 包含如下的pod。此时还没有k8s cluster,那么这些pod是怎么deploy的么?答案就是Static Pod.这样设计就很舒服,完全是k8s方式。

下面就看下各个pod yaml配置文件

// etcd.yamlapiVersion: v1kind: Podmetadata:  annotations:    kubeadm.kubernetes.io/etcd.advertise-client-urls: https://10.xx.xx.xx:2379  creationTimestamp: null  labels:    component: etcd    tier: control-plane  name: etcd  namespace: kube-systemspec:  containers:  - command:    - etcd    - --advertise-client-urls=https://10.xx.xx.xx:2379    - --cert-file=/etc/kubernetes/pki/etcd/server.crt    - --client-cert-auth=true    - --data-dir=/var/lib/etcd    - --initial-advertise-peer-urls=https://10.xx.xx.xx:2380    - --initial-cluster=localhost.localdomain=https://10.xx.xx.xx:2380    - --key-file=/etc/kubernetes/pki/etcd/server.key    - --listen-client-urls=https://127.0.0.1:2379,https://10.xx.xx.xx:2379    - --listen-metrics-urls=http://127.0.0.1:2381    - --listen-peer-urls=https://10.xx.xx.xx:2380    - --name=localhost.localdomain    - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt    - --peer-client-cert-auth=true    - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key    - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt    - --snapshot-count=10000    - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt    image: k8s.gcr.io/etcd:3.4.3-0    imagePullPolicy: IfNotPresent    livenessProbe:      failureThreshold: 8      httpGet:        host: 127.0.0.1        path: /health        port: 2381        scheme: HTTP      initialDelaySeconds: 15      timeoutSeconds: 15    name: etcd    resources: {}    volumeMounts:    - mountPath: /var/lib/etcd      name: etcd-data    - mountPath: /etc/kubernetes/pki/etcd      name: etcd-certs  hostNetwork: true  priorityClassName: system-cluster-critical  volumes:  - hostPath:      path: /etc/kubernetes/pki/etcd      type: DirectoryOrCreate    name: etcd-certs  - hostPath:      path: /var/lib/etcd      type: DirectoryOrCreate    name: etcd-datastatus: {}
// kubeapi serverapiVersion: v1kind: Podmetadata:  annotations:    kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 10.xx.xx.xx:6443  creationTimestamp: null  labels:    component: kube-apiserver    tier: control-plane  name: kube-apiserver  namespace: kube-systemspec:  containers:  - command:    - kube-apiserver    - --advertise-address=10.xx.xx.xx    - --allow-privileged=true    - --authorization-mode=Node,RBAC    - --client-ca-file=/etc/kubernetes/pki/ca.crt    - --enable-admission-plugins=NodeRestriction    - --enable-bootstrap-token-auth=true    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key    - --etcd-servers=https://127.0.0.1:2379    - --insecure-port=0    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key    - --requestheader-allowed-names=front-proxy-client    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt    - --requestheader-extra-headers-prefix=X-Remote-Extra-    - --requestheader-group-headers=X-Remote-Group    - --requestheader-username-headers=X-Remote-User    - --runtime-config=api/all=true    - --secure-port=6443    - --service-account-key-file=/etc/kubernetes/pki/sa.pub    - --service-cluster-ip-range=10.96.0.0/12    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key    image: k8s.gcr.io/kube-apiserver:v1.18.0    imagePullPolicy: IfNotPresent    livenessProbe:      failureThreshold: 8      httpGet:        host: 10.xx.xx.xx        path: /healthz        port: 6443        scheme: HTTPS      initialDelaySeconds: 15      timeoutSeconds: 15    name: kube-apiserver    resources:      requests:        cpu: 250m    volumeMounts:    - mountPath: /etc/ssl/certs      name: ca-certs      readOnly: true    - mountPath: /etc/pki      name: etc-pki      readOnly: true    - mountPath: /etc/kubernetes/pki      name: k8s-certs      readOnly: true  hostNetwork: true  priorityClassName: system-cluster-critical  volumes:  - hostPath:      path: /etc/ssl/certs      type: DirectoryOrCreate    name: ca-certs  - hostPath:      path: /etc/pki      type: DirectoryOrCreate    name: etc-pki  - hostPath:      path: /etc/kubernetes/pki      type: DirectoryOrCreate    name: k8s-certsstatus: {}
// kube-schedulerapiVersion: v1kind: Podmetadata:  creationTimestamp: null  labels:    component: kube-scheduler    tier: control-plane  name: kube-scheduler  namespace: kube-systemspec:  containers:  - command:    - kube-scheduler    - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf    - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf    - --bind-address=127.0.0.1    - --kubeconfig=/etc/kubernetes/scheduler.conf    - --leader-elect=true    image: k8s.gcr.io/kube-scheduler:v1.18.0    imagePullPolicy: IfNotPresent    livenessProbe:      failureThreshold: 8      httpGet:        host: 127.0.0.1        path: /healthz        port: 10259        scheme: HTTPS      initialDelaySeconds: 15      timeoutSeconds: 15    name: kube-scheduler    resources:      requests:        cpu: 100m    volumeMounts:    - mountPath: /etc/kubernetes/scheduler.conf      name: kubeconfig      readOnly: true  hostNetwork: true  priorityClassName: system-cluster-critical  volumes:  - hostPath:      path: /etc/kubernetes/scheduler.conf      type: FileOrCreate    name: kubeconfigstatus: {}
// kube-controller-managerapiVersion: v1kind: Podmetadata:  creationTimestamp: null  labels:    component: kube-controller-manager    tier: control-plane  name: kube-controller-manager  namespace: kube-systemspec:  containers:  - command:    - kube-controller-manager    - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf    - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf    - --bind-address=127.0.0.1    - --client-ca-file=/etc/kubernetes/pki/ca.crt    - --cluster-name=kubernetes    - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt    - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key    - --controllers=*,bootstrapsigner,tokencleaner    - --horizontal-pod-autoscaler-sync-period=10s    - --horizontal-pod-autoscaler-use-rest-clients=true    - --kubeconfig=/etc/kubernetes/controller-manager.conf    - --leader-elect=true    - --node-monitor-grace-period=10s    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt    - --root-ca-file=/etc/kubernetes/pki/ca.crt    - --service-account-private-key-file=/etc/kubernetes/pki/sa.key    - --use-service-account-credentials=true    image: k8s.gcr.io/kube-controller-manager:v1.18.0    imagePullPolicy: IfNotPresent    livenessProbe:      failureThreshold: 8      httpGet:        host: 127.0.0.1        path: /healthz        port: 10257        scheme: HTTPS      initialDelaySeconds: 15      timeoutSeconds: 15    name: kube-controller-manager    resources:      requests:        cpu: 200m    volumeMounts:    - mountPath: /etc/ssl/certs      name: ca-certs      readOnly: true    - mountPath: /etc/pki      name: etc-pki      readOnly: true    - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec      name: flexvolume-dir    - mountPath: /etc/kubernetes/pki      name: k8s-certs      readOnly: true    - mountPath: /etc/kubernetes/controller-manager.conf      name: kubeconfig      readOnly: true  hostNetwork: true  priorityClassName: system-cluster-critical  volumes:  - hostPath:      path: /etc/ssl/certs      type: DirectoryOrCreate    name: ca-certs  - hostPath:      path: /etc/pki      type: DirectoryOrCreate    name: etc-pki  - hostPath:      path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec      type: DirectoryOrCreate    name: flexvolume-dir  - hostPath:      path: /etc/kubernetes/pki      type: DirectoryOrCreate    name: k8s-certs  - hostPath:      path: /etc/kubernetes/controller-manager.conf      type: FileOrCreate    name: kubeconfigstatus: {}
  1. Once the above installation is done, the kubeadm will generate a bootstrap token. Any node with kubeadm and kubelet can join the cluster with this token.
  2. Once the token is generated, the kubeadm will store the ca.crt into etcd as configmap. the configmap name is cluster-info.
$kubectl get cm cluster-info -n kube-public -o yamlapiVersion: v1data:  kubeconfig: |    apiVersion: v1    clusters:    - cluster:        certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01EVXhNakF4TURJek5Wb1hEVE13TURVeE1EQXhNREl6TlZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS21hCmh3NDFTOStoVHZFc2ZXdVVib2RZQVFsSXdlYlljM3J1bUc4dzAyWTlWdHRJSWE4UFBJTHVJdG1ySCt1M0V4cUkKUmljQnhFQ3k1c09GT2srbFEzNHB5TkJGdWxUZmdldHNySUlIUC95dVYyeHVnVDRySW5aZyt2ckc4Y1NqWUtNaQpXdkREY2g2OC9UWllOd2xaNzgyOEYvdTF1SDdQOVhaZnFaQ01FcTU1ZWl1T1lMc3JudnJ6eml6MHBxakdlcXcvCkZXcmh0REo2M3pUMWVXUVcrSDdDaEZKSXpwc0ZoUmt5emNVQU1pWlNVTDBIUHc5amtWb2JWL1B5cVM1Wk9kdEUKV01VQk5Td3NPRDVXNGs4MTdhTXNvdGZiM3haMS9QL3NGUmhqdjEwc2w5bXNGMFJDcTgrenRVNW5CQ0dsdXFXdQpQT0ZDK2pHSnFwTlgwM3Bkam1rQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCTi8xSWtmd05nMlU2emU5UjB4d254eWRXQ1MKNEhLejdyQnBjcmxIbW0rSVIwekFMd3pJRi9yL2NJVHZlbzZTR3VrZ0E5OFhkM0lqckgwR0JzNmt0dWtEV1VobQp2dFp5U2V1cVpzemhPeEZXWVZFTlVzOEdteVBDZE1Wb1BSOFBlOWZudVFBQ2tQYWcrVXc1L2toNkFaZThEdi9ZCkdtcC9Xd1JleFlGcFBPMVhqWjBuUWtNcGhKMVFrVHdnK1dsK2JpWE1SVXNsVFViZk55TGtkUUdFSFJTVUF0Z1IKaFVFcHZGNU5UcVUrYk1uQm0wbGdDblBCNGc0WVl3Zm5UWWVzWHVFbkVvc05oUDl1UzFETEtHeXZPY3k1Qi9KRQphN2J1Z0pQWFZQd2crRkxnci9IZUQvcGI2VWpGTU13UHBoaFJPamNIY1pVWkpoeS82eHlpM3k3UkxyMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=        server: https://10.xx.xx.xx:6443      name: ""    contexts: null    current-context: ""    kind: Config    preferences: {}    users: nullkind: ConfigMapmetadata:  creationTimestamp: "2020-05-12T01:03:03Z"  managedFields:  - apiVersion: v1    fieldsType: FieldsV1    fieldsV1:      f:data:        .: {}        f:kubeconfig: {}    manager: kubeadm    operation: Update    time: "2020-05-12T01:03:03Z"  name: cluster-info  namespace: kube-public  resourceVersion: "190690"  selfLink: /api/v1/namespaces/kube-public/configmaps/cluster-info  uid: 816ef3fd-54b7-41ae-be4c-08d0e8359a40
  1. Install kube-proxy and dns plugin