K8S从1.8版本开始,CPU、内存等资源的metrics信息能够通过 Metrics API来获取,用户能够间接获取这些metrics信息(例如通过执行kubect top命令),HPA应用这些metics信息来实现动静伸缩。本文介绍K8S集群基于metric server的HPA。在开始之前咱们须要理解一下Metrics API和Metrics Server。

Metrics API:
1、通过Metrics API咱们能够获取到指定node或者pod的以后资源应用状况,API自身不存储任何信息,所以咱们不可能通过API来获取资源的历史应用状况。
2、Metrics API的获取门路位于:/apis/metrics.k8s.io/ 
3、获取Metrics API的前提条件是metrics server要在K8S集群中胜利部署
4、更多的metrics材料请参考:https://github.com/kubernetes/metrics

Metrics server:
1、Metrics server是K8S集群资源应用状况的聚合器
2、从1.8版本开始,Metrics server默认能够通过kube-up.sh 脚本以deployment的形式进行部署,也能够通过yaml文件的形式进行部署
3、Metrics server收集所有node节点的metrics信

Plan-A

[root@master49 metrics-images]# cat metrics-applaction.yaml apiVersion: v1kind: ServiceAccountmetadata:  name: metrics-server  namespace: kube-system  labels:    kubernetes.io/cluster-service: "true"    addonmanager.kubernetes.io/mode: Reconcile---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  name: system:metrics-serverrules:- apiGroups:  - ""  resources:  - pods  - nodes  - nodes/stats  - namespaces  verbs:  - get  - list  - watch- apiGroups:  - "extensions"  resources:  - deployments  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: system:metrics-serverroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:metrics-serversubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1beta1kind: ClusterRoleBindingmetadata:  name: metrics-server:system:auth-delegatorroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:auth-delegatorsubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1beta1kind: RoleBindingmetadata:  name: metrics-server-auth-reader  namespace: kube-systemroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: extension-apiserver-authentication-readersubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: apiregistration.k8s.io/v1beta1kind: APIServicemetadata:  name: v1beta1.metrics.k8s.iospec:  service:    name: metrics-server    namespace: kube-system  group: metrics.k8s.io  version: v1beta1  insecureSkipTLSVerify: true  groupPriorityMinimum: 100  versionPriority: 100---apiVersion: v1kind: Servicemetadata:  name: metrics-server  namespace: kube-system  labels:    kubernetes.io/name: "Metrics-server"spec:  selector:    k8s-app: metrics-server  ports:  - port: 443    protocol: TCP    targetPort: 443---apiVersion: v1kind: ConfigMapmetadata:  name: metrics-server-config  namespace: kube-system  labels:    kubernetes.io/cluster-service: "true"    addonmanager.kubernetes.io/mode: EnsureExistsdata:  NannyConfiguration: |-    apiVersion: nannyconfig/v1alpha1    kind: NannyConfiguration---apiVersion: apps/v1kind: Deploymentmetadata:  name: metrics-server  namespace: kube-system  labels:    k8s-app: metrics-server-v0.3.1    kubernetes.io/cluster-service: "true"    addonmanager.kubernetes.io/mode: Reconcile    version: v0.3.1spec:  selector:    matchLabels:      k8s-app: metrics-server      version: v0.3.1  template:    metadata:      name: metrics-server      labels:        k8s-app: metrics-server        version: v0.3.1      annotations:        scheduler.alpha.kubernetes.io/critical-pod: ''        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'    spec:      priorityClassName: system-cluster-critical      serviceAccountName: metrics-server      containers:      - name: metrics-server        image: htcfive/metrics-server-amd64        command:        - /metrics-server        - --kubelet-insecure-tls        - --kubelet-preferred-address-types=InternalIP        ports:        - containerPort: 443          name: https          protocol: TCP      - name: metrics-server-nanny        image: wcollin/addon-resizer:1.8.1        resources:          limits:            cpu: 100m            memory: 300Mi          requests:            cpu: 5m            memory: 50Mi        env:          - name: MY_POD_NAME            valueFrom:              fieldRef:                fieldPath: metadata.name          - name: MY_POD_NAMESPACE            valueFrom:              fieldRef:                fieldPath: metadata.namespace        volumeMounts:        - name: metrics-server-config-volume          mountPath: /etc/config        command:          - /pod_nanny          - --cpu=40m          - --extra-cpu=0.5m          - --memory=40Mi          - --extra-memory=4Mi          - --threshold=5          - --deployment=metrics-server-v0.2.1          - --container=metrics-server          - --poll-period=300000          - --estimator=exponential      volumes:        - name: metrics-server-config-volume          configMap:            name: metrics-server-config      tolerations:        - key: "CriticalAddonsOnly"          operator: "Exists"
[root@master49 metrics-images]# kubectl apply -f metrics-applaction.yaml serviceaccount/metrics-server createdclusterrole.rbac.authorization.k8s.io/system:metrics-server createdclusterrolebinding.rbac.authorization.k8s.io/system:metrics-server createdclusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator createdrolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader createdapiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io createdservice/metrics-server createdconfigmap/metrics-server-config createderror: unable to recognize "metrics-applaction.yaml": no matches for kind "Deployment" in version "extensions/v1beta1"[root@master49 metrics-images]# kubectl delete -f metrics-applaction.yaml serviceaccount "metrics-server" deletedclusterrole.rbac.authorization.k8s.io "system:metrics-server" deletedclusterrolebinding.rbac.authorization.k8s.io "system:metrics-server" deletedclusterrolebinding.rbac.authorization.k8s.io "metrics-server:system:auth-delegator" deletedrolebinding.rbac.authorization.k8s.io "metrics-server-auth-reader" deletedapiservice.apiregistration.k8s.io "v1beta1.metrics.k8s.io" deletedservice "metrics-server" deletedconfigmap "metrics-server-config" deletederror: unable to recognize "metrics-applaction.yaml": no matches for kind "Deployment" in version "extensions/v1beta1"[root@master49 metrics-images]# vim metrics-applaction.yaml [root@master49 metrics-images]# kubectl apply -f metrics-applaction.yaml serviceaccount/metrics-server createdclusterrole.rbac.authorization.k8s.io/system:metrics-server createdclusterrolebinding.rbac.authorization.k8s.io/system:metrics-server createdclusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator createdrolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader createdapiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io createdservice/metrics-server createdconfigmap/metrics-server-config createddeployment.apps/metrics-server created[root@master49 metrics-images]# [root@master49 metrics-images]# kubectl get pods -n kube-systemNAME                                      READY   STATUS    RESTARTS   AGEcalico-kube-controllers-75d555c48-c66ll   1/1     Running   0          18hcalico-node-4w9rr                         1/1     Running   0          18hcalico-node-hrvcq                         1/1     Running   0          18hcalico-node-srz5h                         1/1     Running   0          18hcalico-node-xmb28                         1/1     Running   0          18hcalico-node-zhrs2                         1/1     Running   0          18hcoredns-7ff77c879f-46sjv                  1/1     Running   0          19hcoredns-7ff77c879f-txpw9                  1/1     Running   0          5hetcd-master49                             1/1     Running   6          18hetcd-master50                             1/1     Running   3          18hetcd-master56                             1/1     Running   17         18hkube-apiserver-master49                   1/1     Running   1          18hkube-apiserver-master50                   1/1     Running   0          18hkube-apiserver-master56                   1/1     Running   0          18hkube-controller-manager-master49          1/1     Running   8          18hkube-controller-manager-master50          1/1     Running   6          18hkube-controller-manager-master56          1/1     Running   6          18hkube-proxy-2gcg5                          1/1     Running   0          18hkube-proxy-4ckzn                          1/1     Running   0          18hkube-proxy-bffn9                          1/1     Running   0          18hkube-proxy-kdjtv                          1/1     Running   0          18hkube-proxy-w9xzf                          1/1     Running   0          18hkube-scheduler-master49                   1/1     Running   8          18hkube-scheduler-master50                   1/1     Running   5          18hkube-scheduler-master56                   1/1     Running   7          18hlog-pilot-2d2s9                           1/1     Running   0          5h1mlog-pilot-cnfvq                           1/1     Running   0          5h1mlog-pilot-hgbkm                           1/1     Running   0          5h1mlog-pilot-vwjtm                           1/1     Running   0          5hlog-pilot-zqvnp                           1/1     Running   0          5hmetrics-server-99d8bb9cc-m5lp9            2/2     Running   0          3m42s
[root@master49 metrics-images]# kubectl top pods -n kube-systemNAME                                      CPU(cores)   MEMORY(bytes)   calico-kube-controllers-75d555c48-c66ll   2m           9Mi             calico-node-4w9rr                         41m          25Mi            calico-node-hrvcq                         44m          39Mi            calico-node-srz5h                         40m          25Mi            calico-node-xmb28                         41m          25Mi            calico-node-zhrs2                         44m          41Mi            coredns-7ff77c879f-46sjv                  8m           8Mi             coredns-7ff77c879f-txpw9                  6m           8Mi             etcd-master49                             64m          99Mi            etcd-master50                             91m          428Mi           etcd-master56                             95m          423Mi           kube-apiserver-master49                   66m          471Mi           kube-apiserver-master50                   61m          504Mi           kube-apiserver-master56                   59m          474Mi           kube-controller-manager-master49          4m           15Mi            kube-controller-manager-master50          27m          67Mi            kube-controller-manager-master56          6m           15Mi            kube-proxy-2gcg5                          1m           13Mi            kube-proxy-4ckzn                          14m          14Mi            kube-proxy-bffn9                          20m          14Mi            kube-proxy-kdjtv                          1m           20Mi            kube-proxy-w9xzf                          15m          19Mi            kube-scheduler-master49                   4m           18Mi            kube-scheduler-master50                   6m           24Mi            kube-scheduler-master56                   4m           17Mi            log-pilot-2d2s9                           3m           18Mi            log-pilot-cnfvq                           3m           21Mi            log-pilot-hgbkm                           3m           7Mi             log-pilot-vwjtm                           4m           7Mi             log-pilot-zqvnp                           3m           7Mi             metrics-server-99d8bb9cc-m5lp9            1m           28Mi   [root@master49 metrics-images]# kubectl top nodeserror: metrics not available yet###### 别慌,等会就好了[root@master49 metrics-images]# kubectl top nodesNAME       CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   master49   378m         9%     1851Mi          15%       master50   498m         12%    2476Mi          20%       master56   470m         11%    2196Mi          18%       node52     2949m        12%    65802Mi         31%       node53     1285m        5%     31770Mi         15%       [root@master49 metrics-images]# lsmetrics-applaction.yaml

Plan-B

[root@master49 metrics-server]# lltotal 32-rw-r--r--. 1 root root 304 Dec 24 13:57 auth-delegator.yaml-rw-r--r--. 1 root root 325 Dec 24 13:57 auth-reader.yaml-rw-r--r--. 1 root root 294 Dec 24 13:57 metrics-apiservice.yaml-rw-r--r--. 1 root root 284 Dec 24 13:57 metrics-server-cluster-role-binding.yaml-rw-r--r--. 1 root root 409 Dec 24 13:57 metrics-server-cluster-role.yaml-rw-r--r--. 1 root root 585 Mar 30 14:31 metrics-server-deployment.yaml-rw-r--r--. 1 root root  94 Dec 24 13:57 metrics-server-service-account.yaml-rw-r--r--. 1 root root 245 Dec 24 13:57 metrics-server-service.yaml[root@master49 metrics-server]# ll|awk '{print $9}'auth-delegator.yamlauth-reader.yamlmetrics-apiservice.yamlmetrics-server-cluster-role-binding.yamlmetrics-server-cluster-role.yamlmetrics-server-deployment.yamlmetrics-server-service-account.yamlmetrics-server-service.yaml[root@master49 metrics-server]# ll|awk '{print $9}' | xargs cat apiVersion: rbac.authorization.k8s.io/v1beta1kind: ClusterRoleBindingmetadata:  name: metrics-server:system:auth-delegatorroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:auth-delegatorsubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-systemapiVersion: rbac.authorization.k8s.io/v1beta1kind: RoleBindingmetadata:  name: metrics-server-auth-reader  namespace: kube-systemroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: extension-apiserver-authentication-readersubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-systemapiVersion: apiregistration.k8s.io/v1beta1kind: APIServicemetadata:  name: v1beta1.metrics.k8s.iospec:  service:    name: metrics-server    namespace: kube-system  group: metrics.k8s.io  version: v1beta1  insecureSkipTLSVerify: true  groupPriorityMinimum: 100  versionPriority: 100apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: system:metrics-serverroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:metrics-serversubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-systemapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  name: system:metrics-serverrules:- apiGroups:  - ""  resources:  - pods  - nodes  - nodes/stats  - namespaces  verbs:  - get  - list  - watch- apiGroups:  - "apps"  resources:  - deployments  verbs:  - get  - list  - watch- apiGroups:  - "extensions"  resources:  - deployments  verbs:  - get  - list  - watchapiVersion: apps/v1kind: Deploymentmetadata:  name: metrics-server  namespace: kube-system  labels:    k8s-app: metrics-serverspec:  selector:    matchLabels:      k8s-app: metrics-server  template:    metadata:      name: metrics-server      labels:        k8s-app: metrics-server    spec:      serviceAccountName: metrics-server      containers:      - name: metrics-server        image: docker.io/carlziess/metrics-server-amd64-v0.2.1:latest        imagePullPolicy: Always        command:        - /metrics-server        - --source=kubernetes.summary_api:''apiVersion: v1kind: ServiceAccountmetadata:  name: metrics-server  namespace: kube-systemapiVersion: v1kind: Servicemetadata:  name: metrics-server  namespace: kube-system  labels:    kubernetes.io/name: "Metrics-server"spec:  selector:    k8s-app: metrics-server  ports:  - port: 443    protocol: TCP    targetPort: 443
[root@master49 metrics-server]# kubectl apply -f .clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator createdrolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader createdapiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io createdclusterrolebinding.rbac.authorization.k8s.io/system:metrics-server createdclusterrole.rbac.authorization.k8s.io/system:metrics-server createddeployment.apps/metrics-server createdserviceaccount/metrics-server createdservice/metrics-server created