共计 5046 个字符,预计需要花费 13 分钟才能阅读完成。
背景:
紧接 client-go 连贯 kubernetes 集群 -connect and list,client-go 连贯 kubernetes 集群 -create 相干操作。实例都是拿 namespace 和deployment 两个为代表进行开展延长的(集体环境中 deployment 还是具备代表性的),后面创立了 namespace deployment,失常的流程下一步就是批改 namespace and deployment 了!
client-go 连贯 kubernetes 集群 -update 相干操作
1. namespace 的 update
参照 create
先看一眼 &corev1.Namespace metav1.ObjectMeta 中都有哪些配置能够批改,metav1.ObjectMeta{}填充一下所有字段:
Name 还是默认的 zhangpeng namespace 了,我增加一个 labels?
main.go
package main
import (
"context"
"flag"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"path/filepath"
)
func main() {
var kubeconfig *string
if home := homedir.HomeDir(); home != "" {kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {kubeconfig = flag.String("kubeconfig", "","absolute path to the kubeconfig file")
}
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {panic(err.Error())
}
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "zhangpeng",
GenerateName: "",
Namespace: "",
SelfLink: "",
UID: "",
ResourceVersion: "",
Generation: 0,
CreationTimestamp: metav1.Time{},
DeletionTimestamp: nil,
DeletionGracePeriodSeconds: nil,
Labels: map[string]string{"dev": "test",},
Annotations: nil,
OwnerReferences: nil,
Finalizers: nil,
ClusterName: "",
ManagedFields: nil,
},
}
result, _ := clientset.CoreV1().Namespaces().Update(context.TODO(), namespace, metav1.UpdateOptions{})
fmt.Println(result)
}
运行 main.go
登录某云后盾确认生成 label!这里正好看到了被迫配额与限度?刚巧最近在看文章的时候看到一个这样的例子:基于 client-go 操作 namespace 资源配额设计
2. 扩大一下 resourcequotas
main.go
package main
import (
"context"
"flag"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"path/filepath"
)
func main() {
var kubeconfig *string
if home := homedir.HomeDir(); home != "" {kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {kubeconfig = flag.String("kubeconfig", "","absolute path to the kubeconfig file")
}
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {panic(err.Error())
}
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "zhangpeng",
GenerateName: "",
Namespace: "",
SelfLink: "",
UID: "",
ResourceVersion: "",
Generation: 0,
CreationTimestamp: metav1.Time{},
DeletionTimestamp: nil,
DeletionGracePeriodSeconds: nil,
Labels: map[string]string{"dev": "test",},
Annotations: nil,
OwnerReferences: nil,
Finalizers: nil,
ClusterName: "",
ManagedFields: nil,
},
}
result, _ := clientset.CoreV1().Namespaces().Update(context.TODO(), namespace, metav1.UpdateOptions{})
fmt.Println(result)
quotaTest := clientset.CoreV1().ResourceQuotas("zhangpeng")
quota := &corev1.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{Name: "quota-namespace",},
Spec: corev1.ResourceQuotaSpec{Hard: map[corev1.ResourceName]resource.Quantity{corev1.ResourceLimitsCPU: resource.MustParse("200m"),
corev1.ResourceLimitsMemory: resource.MustParse("200M"),
corev1.ResourceRequestsCPU: resource.MustParse("1000m"),
corev1.ResourceRequestsMemory: resource.MustParse("1Gi"),
},
},
}
result1, err := quotaTest.Create(context.TODO(), quota, metav1.CreateOptions{})
if err != nil {fmt.Println(err)
} else {fmt.Println(result1)
}
}
kubectl get resourcequotas -n zhangpeng
3. update deployment
参照:client-go 连贯 kubernetes 集群 -create 相干操作,生成 yaml 读取文件流的形式:
批改 nginx 镜像 tag 为 1.16
src/yamls/nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: nginx
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: nginx:1.16
name: nginx
resources: {}
status: {}
当初如果间接运行必定是 already exists 的!
批改 main.go 如下:
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"path/filepath"
)
func main() {
var kubeconfig *string
if home := homedir.HomeDir(); home != "" {kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {kubeconfig = flag.String("kubeconfig", "","absolute path to the kubeconfig file")
}
flag.Parse()
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {panic(err.Error())
}
b, err := ioutil.ReadFile("src/yamls/nginx.yaml")
nginxDep := &v1.Deployment{}
nginxJson, _ := yaml.ToJSON(b)
if err = json.Unmarshal(nginxJson, nginxDep); err != nil {return}
if _, err = clientset.AppsV1().Deployments("zhangpeng").Update(context.Background(), nginxDep, metav1.UpdateOptions{}); err != nil {fmt.Println(err)
return
}
}
运行 main.go
kubectl get deployments -n zhangpeng -o yaml
强调:
- context.Background() context.TODO()还是有点懵 分不清什么时候用 ……
- 执行后果的返回没有统一格式化输入,以及一下谬误的解决?
正文完
发表至: kubernetes
2022-05-04