乐趣区

关于docker:Kubernetes-118-配置集群内部dns

批改 coredns cm


[root@master49 test]#  kubectl get cm coredns -n kube-system -o yaml
apiVersion: v1
data:
  Corefile: |
    .:53 {
        errors
        health {lameduck 5s}
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
           pods insecure
           fallthrough in-addr.arpa ip6.arpa
           ttl 30
        }
        prometheus :9153
        #forward . /etc/resolv.conf
        forward . 192.168.6.242  ### 批改这里即可
        cache 30
        reload
        loadbalance
    }
kind: ConfigMap
metadata:
  creationTimestamp: "2020-12-21T02:23:59Z"
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:data: {}
    manager: kubeadm
    operation: Update
    time: "2020-12-21T02:23:59Z"
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:data:
        f:Corefile: {}
    manager: kubectl
    operation: Update
    time: "2021-03-16T08:26:27Z"
  name: coredns
  namespace: kube-system
  resourceVersion: "23351960"
  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
  uid: 8d85909b-afe2-47ab-92c8-0a14059433d3

[root@master49 test]# kubectl edit cm coredns -n kube-system

重启 coredns


[root@master49 test]# kubectl get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-kube-controllers-75d555c48-9z5ql   1/1     Running   5          85d
calico-node-2xt99                         1/1     Running   4          85d
calico-node-8hd6s                         1/1     Running   3          85d
calico-node-bgg29                         1/1     Running   2          85d
calico-node-pq2pc                         1/1     Running   2          85d
calico-node-rs77f                         1/1     Running   4          85d
coredns-7ff77c879f-dkxcq                  1/1     Running   0          8m38s
coredns-7ff77c879f-vc262                  1/1     Running   0          8m16s
etcd-master49                             1/1     Running   5          85d
etcd-master50                             1/1     Running   3          85d
etcd-master56                             1/1     Running   17         85d
kube-apiserver-master49                   1/1     Running   0          54d
kube-apiserver-master50                   1/1     Running   0          54d
kube-apiserver-master56                   1/1     Running   0          54d
kube-controller-manager-master49          1/1     Running   7          85d
kube-controller-manager-master50          1/1     Running   5          85d
kube-controller-manager-master56          1/1     Running   6          85d
kube-proxy-4csh5                          1/1     Running   5          85d
kube-proxy-54pqr                          1/1     Running   2          85d
kube-proxy-h2ttm                          1/1     Running   3          85d
kube-proxy-nr7z4                          1/1     Running   2          85d
kube-proxy-xtrqz                          1/1     Running   3          85d
kube-scheduler-master49                   1/1     Running   7          85d
kube-scheduler-master50                   1/1     Running   5          85d
kube-scheduler-master56                   1/1     Running   6          85d
log-pilot-2lzh2                           1/1     Running   1          84d
log-pilot-4lrvr                           1/1     Running   0          84d
log-pilot-9mj54                           1/1     Running   0          84d
log-pilot-rwrzh                           1/1     Running   1          84d
log-pilot-xs7mh                           1/1     Running   1          84d
[root@master49 test]# kubectl delete pod coredns-7ff77c879f-dkxcq -n kube-system
pod "coredns-7ff77c879f-dkxcq" deleted
[root@master49 test]# kubectl delete pod coredns-7ff77c879f-vc262 -n kube-system
pod "coredns-7ff77c879f-vc262" deleted


退出移动版