1.规范的yaml文件
apiVersion: v1
kind: Pod
metadata:
name: pod-base
namespace: dev
labels:
user: slq
spec:
#nodeName: node1
#nodeSelector:
# env: uat
affinity:
podAntiAffinity:
#podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: user
operator: In
values: ["slq"]
topologyKey: kubernetes.io/hostname
#nodeAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: env
# operator: In
# values: ["uat","test"]
#nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: env
# operator: In
# values: ["uat"]
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent # newer IfNotPresent
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
resources:
limits: # cpu: "1"
memory: "10Gi"
requests: # cpu: "1"
memory: "10Mi"
lifecycle:
postStart:
exec:
command: ["/bin/sh","-c", "echo poststart... > /usr/share/nginx/html/index.html"]
#tcpSocket:
#port: 80
preStop:
exec:
command: ["/usr/sbin/nginx","-s","quit"]
livenessProbe:
#exec:
# command: ["/bin/ls","/tmp/"]
#tcpSocket:
#port: 80
httpGet:
scheme: HTTP
port: 80
path: /
restartPolicy: Never #Always OnFailure Never
initContainers:
- name: busybox
image: busybox:1.30
imagePullPolicy: IfNotPresent
# command: ["/bin/sh","-c","touch /tmp/hello.txt;while true;do /bin/echo $(date +%T) >> /tmp/hello.txt; sleep 10;done;"]
command: ["/bin/sh","-c","touch /tmp/hello.txt"]
env:
- name: "username"
value: "root"
- name: "pwd"
value: "123456"
2.pod 重要点记录
pod资源清单:
apiVersion: v1 #必选,版本号,例如v1
kind: Pod #必选,资源类型,例如 Pod
metadata: #必选,元数据
name: string #必选,Pod名称
namespace: string #Pod所属的命名空间,默认为"default"
labels: #自定义标签列表
- name: string
spec: #必选,Pod中容器的具体定义
containers: #必选,Pod中容器列表
- name: string #必选,容器名称
image: string #必选,容器的镜像名称
imagePullPolicy: [ Always|Never|IfNotPresent ] #获取镜像的策略
command: [string] #容器的启动命令列表,如不指定,应用打包时应用的启动命令
args: [string] #容器的启动命令参数列表
workingDir: string #容器的工作目录
volumeMounts: #挂载到容器外部的存储卷配置
- name: string #援用pod定义的共享存储卷的名称,需用volumes[]局部定义的的卷名
mountPath: string #存储卷在容器内mount的绝对路径,应少于512字符
readOnly: boolean #是否为只读模式
ports: #须要裸露的端口库号列表
- name: string #端口的名称
containerPort: int #容器须要监听的端口号
hostPort: int #容器所在主机须要监听的端口号,默认与Container雷同
protocol: string #端口协定,反对TCP和UDP,默认TCP
env: #容器运行前需设置的环境变量列表
- name: string #环境变量名称
value: string #环境变量的值
resources: #资源限度和申请的设置
limits: #资源限度的设置
cpu: string #Cpu的限度,单位为core数,将用于docker run --cpu-shares参数
memory: string #内存限度,单位能够为Mib/Gib,将用于docker run --memory参数
requests: #资源申请的设置
cpu: string #Cpu申请,容器启动的初始可用数量
memory: string #内存申请,容器启动的初始可用数量
lifecycle: #生命周期钩子
postStart: #容器启动后立刻执行此钩子,如果执行失败,会依据重启策略进行重启
preStop: #容器终止前执行此钩子,无论后果如何,容器都会终止
livenessProbe: #对Pod内各容器健康检查的设置,当探测无响应几次后将主动重启该容器
exec: #对Pod容器内查看形式设置为exec形式
command: [string] #exec形式须要制订的命令或脚本
httpGet: #对Pod内个容器健康检查办法设置为HttpGet,须要制订Path、port
path: string
port: number
host: string
scheme: string
HttpHeaders:
- name: string
value: string
tcpSocket: #对Pod内个容器健康检查形式设置为tcpSocket形式
port: number
initialDelaySeconds: 0 #容器启动实现后首次探测的工夫,单位为秒
timeoutSeconds: 0 #对容器健康检查探测期待响应的超时工夫,单位秒,默认1秒
periodSeconds: 0 #对容器监控查看的定期探测工夫设置,单位秒,默认10秒一次
successThreshold: 0
failureThreshold: 0
securityContext:
privileged: false
restartPolicy: [Always | Never | OnFailure] #Pod的重启策略
nodeName: <string> #设置NodeName示意将该Pod调度到指定到名称的node节点上
nodeSelector: obeject #设置NodeSelector示意将该Pod调度到蕴含这个label的node上
imagePullSecrets: #Pull镜像时应用的secret名称,以key:secretkey格局指定
- name: string
hostNetwork: false #是否应用主机网络模式,默认为false,如果设置为true,示意应用宿主机网络
volumes: #在该pod上定义共享存储卷列表
- name: string #共享存储卷名称 (volumes类型有很多种)
emptyDir: {} #类型为emtyDir的存储卷,与Pod同生命周期的一个长期目录。为空值
hostPath: string #类型为hostPath的存储卷,示意挂载Pod所在宿主机的目录
path: string #Pod所在宿主机的目录,将被用于同期中mount的目录
secret: #类型为secret的存储卷,挂载集群与定义的secret对象到容器外部
scretname: string
items:
- key: string
path: string
configMap: #类型为configMap的存储卷,挂载预约义的configMap对象到容器外部
name: string
items:
- key: string
path: string
kubectl explain pod
containers: 能够定义多个容器对象
name
image
imagePullPolicy
command
args
env
ports
apiVersion: v1
kind: Pod
metadata:
name: pod-base
namespace: dev
labels:
user: slq
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
- name: busybox
image: busybox:1.3.0
imagePullPolicy: IfNotPresent
进入容器中执行命令
kubectl exec pod-base -n dev -it -c busybox /bin/sh //pod-base pod名称 dev 命名空间 busybox 容器名称
limit:
cpu: "1"
memory: "1024M"
requests:
cpu: "2"
memory: "10M"
pod的生命周期:
创立
运行初始化容器
运行主容器过程
容器启动后钩子、容器终止前钩子函数
容器的存活 探测就绪性探测
创立和终止过程:
创立过程:
创立api申请到api-server,api-server将申请信息保留到etcd中,间接返回响应,调度器通过etcd watch机制获取创立pod的信息,而后依据算法计算调配到哪些节点进行创立容器
具体的节点通过watch机制察看到须要创立容器,而后创立容器,创立实现之后返回后果到api-server,api-server批改pod信息
终止过程:
api-server收到终止pod的申请,首先有个宽限期30s,将pod的状态改为终止状态,kubelet监控到pod为终止状态就开启敞开pod的过程
端点控制器监控到pod对象处于终止状态,会将其在可拜访的列表中移除
如果存在prestop钩子函数,则会在标记为终止状态后立即启动执行
宽限期完结后,pod如果仍在运行,则会收到立即进行的信号,kubelet会申请api-server将该资源的宽限期改为0,间接删除实现
初始化容器过程:
主容器的启动前置工作
kubectl get pod pod-base -n dev -w 动静查看pod-base pod的变动
容器启动前钩子函数:post start
容器终止前钩子函数: pre stop
lifecycle:
poststart:
exec:
command:
- cat
- /tmp/healthy
tcpSocket:
port: 80
httpGet:
path: /abc #url地址
port: 80
host: 192.168.109.100
scheme: HTTP
The Pod "pod-base" is invalid: spec.containers[0].lifecycle.postStart.tcpSocket: Forbidden: may not specify more than 1 handler type
[root@node1 ~]# kubectl get pod pod-base -n dev
NAME READY STATUS RESTARTS AGE
pod-base 0/1 CrashLoopBackOff 3 63s
容器存活探测: liveness probes 代表服务可能失常提供服务
就绪性探测: readiness probes 代表服务曾经筹备好了
两种形式均反对三种形式:三种形式只能应用其中一种,不能同时应用两种或者三种
---
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
---
livenessProbe:
tcpSocket:
port: 8080
---
livenessProbe:
httpGet:
path: /
port: 80
host: 127.0.0.1
scheme: HTTP
---
kubectl describe pod pod-base -n dev
------------------------------------------
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 96s default-scheduler Successfully assigned dev/pod-base to node2
Normal Created 104s kubelet Created container busybox
Normal Pulled 104s kubelet Container image "busybox:1.30" already present on machine
Normal Started 103s kubelet Started container busybox
Normal Created 45s (x3 over 103s) kubelet Created container nginx
Normal Started 44s (x3 over 102s) kubelet Started container nginx
Normal Pulled 15s (x4 over 103s) kubelet Container image "nginx" already present on machine
Warning Unhealthy 15s (x9 over 95s) kubelet Liveness probe failed: /bin/cat: /tmp/hello.txt: No such file or directory
Normal Killing 15s (x3 over 75s) kubelet Container nginx failed liveness probe, will be restarted
-----------------------------------------
kubectl explain pod.spec.containers.livenessProbe
pod的重启策略:
Always
OnFailure
Never
-------------------------------------------------------------------------------------------------------------------------------------
pod的调度
定向调度:NodeName NodeSelector
亲和性调度:NodeAffinity PodAffinity PodAntiAffinity
污点: Taints Toleration
NodeName: node1
NodeSelector:
env: uat
kubectl label nodes node1 env=prod
kubectl label nodes node2 env=dev
kubectl label nodes node3 env=uat
亲和性调度:
nodeAffinity (node亲和性):
pod.spec.affinity.nodeAffinity
requiredDuringSchedulingIgnoredDuringExecution
key Exists DoesNotExist
- matchExpressions:
- key: env #匹配领有env标签的节点 Exists DoesNotExist
operator: Exists
- key: env #匹配env标签值为dev和uat的节点 In NotIn
operator: In
values: ["dev","uat"]
- key: env #匹配env标签值大于dev的节点 Gt Lt 应用的较少
operator: Gt
values: "dev"
preferredDuringSchedulingIgnoreDuringExecution: #优先调度到满足指定规定的node
preference:
//注意事项:
1.如果同时定义了nodeSelector和nodeAffinity,那么必须两个都满足,pod能力运行在指定的node上
2.如果nodeAffinity指定了多个nodeSelectorTerms,那么只须要其中一个可能匹配胜利即可
3.如果一个nodeSelectorTerms中有多个matchExpressions,则一个节点必须满足所有的能力匹配胜利
4.如果一个pod所在的node在pod运行期间产生了变动,不再合乎该pod的节点亲和性需要,则零碎将疏忽此变动
podAffinity (pod亲和性):
podAntiAffinity (pod反亲和性):
pod亲和性:
pod.spec.affinity.podAffinity:
requiredDuringSchedulingIgnoredDuringExecution: #硬限度
namespace
topologyKey
labelSelector
matchExpressions
key
values
operator
matchLabels
preferredDuringSchedulingIgnoreDuringExecution: #软限度
podAffinityTerm
namespace
topologyKey
labelSelector
matchExpressions
key
values
operator
matchLabels
weight
反亲和性
podAntiAffinity: 其余和podAntiAffinity统一
定向调度和亲和性调度都是从pod的角度定义调度信息的
污点:
在node外面配置一些属性来定义调度信息
设置node上的信息确定pod是否调度过去,污点排挤关系
key=value:effect
PreferNoSchedular 尽量不调度到该节点
NoSchedular 调度器将不会调度到该节点上,也不会把该节点上的pod调度到其余节点上
NoExecute 不会调度到该节点上,而且会把该节点的node 调度到别的节点上
设置污点 : kubectl taint nodes node3 key=value:effect
删除污点: kubectl taint nodes node3 taintEff-
去除所有污点: kubectl taint nodes node1 key-
查看污点: kubectl describe nodes node1 ---------------->Taints
kubectl run taint1 --image=nginx -n dev
kubectl run taint2 --image=nginx -n dev
kubectl run taint3 --image=nginx -n dev
kubectl taint nodes node3 taintEff=eff:PreferNoSchedule
kubectl taint nodes node3 taintEff:PreferNoSchedule-
kubectl taint nodes node3 taintEff-
如果屡次设置,或有反复值的
容忍:
Toleration:
key: "taintEff"
operator: "Equal"
value: "eff"
effect: "NoExecute"
tolerationSeconds #容忍工夫,pod在node上的停留时间
发表回复