k -n=admin2406 get deployments.apps -o=jsonk -n=admin2406 get deployments.apps -o=custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[0].image,READY_REPLICAS:.status.readyReplicas,NAMESPACE:.metadata.namespace > /opt/admin2406_data
controlplane ~ k get podsNAME READY STATUS RESTARTS AGEmessaging 1/1 Running 0 4m2scontrolplane ~ k expose pod messaging --port=6379 --name=messaging-serviceservice/messaging-service exposedcontrolplane ~ k get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 172.20.0.1 <none> 443/TCP 48mmessaging-service ClusterIP 172.20.26.32 <none> 6379/TCP 6s
Create Deployment (10)
controlplane ~ k create deployment hr-web-app --image=kodekloud/webapp-color --replicas=2
controlplane ~ β vi analytics-vpa.yamlcontrolplane ~ β cat analytics-vpa.yamlapiVersion: autoscaling.k8s.io/v1kind: VerticalPodAutoscalermetadata: name: analytics-vpa namespace: defaultspec: targetRef: apiVersion: apps/v1 kind: Deployment name: analytics-deployment updatePolicy: updateMode: Autocontrolplane ~ β k apply -f analytics-vpa.yamlverticalpodautoscaler.autoscaling.k8s.io/analytics-vpa createdcontrolplane ~ β k get vpaNAME MODE CPU MEM PROVIDED AGEanalytics-vpa Auto False 38s
Create Gateway (6)
controlplane ~ β vi web-gateway.yamlcontrolplane ~ β cat web-gateway.yamlapiVersion: gateway.networking.k8s.io/v1kind: Gatewaymetadata: name: web-gateway namespace: nginx-gatewayspec: gatewayClassName: nginx listeners: - name: http protocol: HTTP port: 80controlplane ~ β k apply -f web-gateway.yamlgateway.gateway.networking.k8s.io/web-gateway createdcontrolplane ~ β k -n nginx-gateway get gatewayNAME CLASS ADDRESS PROGRAMMED AGEweb-gateway nginx True 13s
Helm Chart upgrade (8)
controlplane ~ β helm list -n kk-nsNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSIONkk-mock1 kk-ns 1 2025-11-06 14:28:24.255194099 +0000 UTC deployed nginx-18.1.0 1.27.0controlplane ~ β helm repo listNAME URLkk-mock1 https://charts.bitnami.com/bitnamicontrolplane ~ β helm repo updateHang tight while we grab the latest from your chart repositories......Successfully got an update from the "kk-mock1" chart repositoryUpdate Complete. βHappy Helming!βcontrolplane ~ β helm repo listNAME URLkk-mock1 https://charts.bitnami.com/bitnamicontrolplane ~ β helm search repo nginxNAME CHART VERSION APP VERSION DESCRIPTIONkk-mock1/nginx 22.2.4 1.29.3 NGINX Open Source is a web server that can be a...kk-mock1/nginx-ingress-controller 12.0.7 1.13.1 NGINX Ingress Controller is an Ingress controll...kk-mock1/nginx-intel 2.1.15 0.4.9 DEPRECATED NGINX Open Source for Intel is a lig...controlplane ~ β helm search repo nginx --versions | grep 18.1.15kk-mock1/nginx 18.1.15 1.27.1 NGINX Open Source is a web server that can be a...controlplane ~ β helm upgrade kk-mock1 kk-mock1/nginx --version=18.1.5 -n kk-nsRelease "kk-mock1" has been upgraded. Happy Helming!NAME: kk-mock1LAST DEPLOYED: Thu Nov 6 14:31:52 2025NAMESPACE: kk-nsSTATUS: deployedREVISION: 2TEST SUITE: NoneNOTES:CHART NAME: nginxCHART VERSION: 18.1.5APP VERSION: 1.27.0** Please be patient while the chart is being deployed **NGINX can be accessed through the following DNS name from within your cluster: kk-mock1-nginx.kk-ns.svc.cluster.local (port 80)To access NGINX from outside the cluster, follow the steps below:1. Get the NGINX URL by running these commands: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace kk-ns -w kk-mock1-nginx' export SERVICE_PORT=$(kubectl get --namespace kk-ns -o jsonpath="{.spec.ports[0].port}" services kk-mock1-nginx) export SERVICE_IP=$(kubectl get svc --namespace kk-ns kk-mock1-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo "http://${SERVICE_IP}:${SERVICE_PORT}"WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs: - cloneStaticSiteFromGit.gitSync.resources - resources+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/β SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.Substituted images detected: - %!s(<nil>)/:%!s(<nil>)controlplane ~ β helm list -n kk-nsNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSIONkk-mock1 kk-ns 2 2025-11-06 14:31:52.339403678 +0000 UTC deployed nginx-18.1.5 1.27.0
controlplane ~ β k -n ingress-ns get deployNAME READY UP-TO-DATE AVAILABLE AGEwebapp-deploy 1/1 1 1 10mcontrolplane ~ β k -n ingress-ns get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEwebapp-svc ClusterIP 172.20.133.154 <none> 80/TCP 10mcontrolplane ~ β vi webapp-ingress.yamlcontrolplane ~ β cat webapp-ingress.yamlapiVersion: networking.k8s.io/v1kind: Ingressmetadata: name: webapp-ingress namespace: ingress-ns annotations: nginx.ingress.kubernetes.io/rewrite-target: /spec: ingressClassName: nginx rules: - host: "kodekloud-ingress.app" http: paths: - path: / pathType: Prefix backend: service: name: webapp-svc port: number: 80controlplane ~ β k create -f webapp-ingress.yamlingress.networking.k8s.io/webapp-ingress createdcontrolplane ~ β k -n ingress-ns get ingressNAME CLASS HOSTS ADDRESS PORTS AGEwebapp-ingress nginx kodekloud-ingress.app 80 9scontrolplane ~ β curl -s http://kodekloud-ingress.app/<!DOCTYPE html><html><head><title>Welcome to nginx!</title><style>html { color-scheme: light dark; }body { width: 35em; margin: 0 auto;font-family: Tahoma, Verdana, Arial, sans-serif; }</style></head><body><h1>Welcome to nginx!</h1><p>If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.</p><p>For online documentation and support please refer to<a href="http://nginx.org/">nginx.org</a>.<br/>Commercial support is available at<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p></body></html>
Create Role and RoleBinding (10)
controlplane ~ β cat /root/CKA/john.csr | base64 | tr -d '\n'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==controlplane ~ β vi csr.yamlcontrolplane ~ β cat csr.yamlapiVersion: certificates.k8s.io/v1kind: CertificateSigningRequestmetadata: name: john-developerspec: signerName: kubernetes.io/kube-apiserver-client request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg== usages: - digital signature - key encipherment - client authcontrolplane ~ β k apply -f csr.yamlcertificatesigningrequest.certificates.k8s.io/john-developer createdcontrolplane ~ β k get csrNAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITIONcsr-f79dh 38m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pr9dl6 <none> Approved,Issuedcsr-wgd7n 39m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane <none> Approved,Issuedjohn-developer 4s kubernetes.io/kube-apiserver-client kubernetes-admin <none> Pendingcontrolplane ~ β k certificate approve john-developercertificatesigningrequest.certificates.k8s.io/john-developer approvedcontrolplane ~ β k get csrNAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITIONcsr-f79dh 39m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pr9dl6 <none> Approved,Issuedcsr-wgd7n 40m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane <none> Approved,Issuedjohn-developer 28s kubernetes.io/kube-apiserver-client kubernetes-admin <none> Approved,Issuedcontrolplane ~ β vi rbac.yamlcontrolplane ~ β cat rbac.yamlapiVersion: rbac.authorization.k8s.io/v1kind: Rolemetadata: namespace: development name: developerrules:- apiGroups: [""] resources: ["pods"] verbs: ["create", "get", "update", "list", "delete"]---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata: name: john-dev-role-binding namespace: developmentsubjects:- kind: User name: john apiGroup: rbac.authorization.k8s.ioroleRef: kind: Role name: developer apiGroup: rbac.authorization.k8s.iocontrolplane ~ β k apply -f rbac.yamlrole.rbac.authorization.k8s.io/developer createdrolebinding.rbac.authorization.k8s.io/john-dev-role-binding createdcontrolplane ~ β k auth can-i create pods --as=john -n developmentyescontrolplane ~ β k auth can-i create pods --as=johnno
Create ClusterIP Service and test dns lookup (10)
controlplane ~ β k run nginx-resolver --image=nginxpod/nginx-resolver createdcontrolplane ~ β k expose pod nginx-resolver --name=nginx-resolver-svc --port=80 --target-port=80 --type=ClusterIPservice/nginx-resolver-svc exposedcontrolplane ~ β k get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 172.20.0.1 <none> 443/TCP 51mnginx-resolver-svc ClusterIP 172.20.137.21 <none> 80/TCP 4scontrolplane ~ β k describe svc nginx-resolver-svcName: nginx-resolver-svcNamespace: defaultLabels: run=nginx-resolverAnnotations: <none>Selector: run=nginx-resolverType: ClusterIPIP Family Policy: SingleStackIP Families: IPv4IP: 172.20.137.21IPs: 172.20.137.21Port: <unset> 80/TCPTargetPort: 80/TCPEndpoints: 172.17.1.15:80Session Affinity: NoneInternal Traffic Policy: ClusterEvents: <none>controlplane ~ β k get pod -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESnginx-deploy-5846bc77f5-2szm4 1/1 Running 0 20m 172.17.1.14 node01 <none> <none>nginx-resolver 1/1 Running 0 85s 172.17.1.15 node01 <none> <none>controlplane ~ β k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-svc > /root/CKA/nginx.svcAll commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.If you don't see a command prompt, try pressing enter.controlplane ~ β cat /root/CKA/nginx.svc Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.localName: nginx-resolver-svcAddress 1: 172.20.137.21 nginx-resolver-svc.default.svc.cluster.localpod "test-nslookup" deleted from default namespacecontrolplane ~ β k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup 172-17-1-15.default.pod > /root/CKA/nginx.podAll commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.If you don't see a command prompt, try pressing enter.controlplane ~ β cat /root/CKA/nginx.podAddress 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.localName: 172-17-1-15.default.podAddress 1: 172.17.1.15 172-17-1-15.nginx-resolver-svc.default.svc.cluster.localpod "test-nslookup" deleted from default namespace
Create StaticPod on node01 (10)
controlplane ~ β k get nodesNAME STATUS ROLES AGE VERSIONcontrolplane Ready control-plane 59m v1.34.0node01 Ready <none> 59m v1.34.0controlplane ~ β k run nginx-critical --image=nginx --dry-run=client -o=yaml > static.yamlcontrolplane ~ β cat static.yamlapiVersion: v1kind: Podmetadata: labels: run: nginx-critical name: nginx-criticalspec: containers: - image: nginx name: nginx-critical resources: {} dnsPolicy: ClusterFirst restartPolicy: Alwaysstatus: {}controlplane ~ β ssh node01Welcome to Ubuntu 22.04.5 LTS (GNU/Linux 5.15.0-1083-gcp x86_64) * Documentation: https://help.ubuntu.com * Management: https://landscape.canonical.com * Support: https://ubuntu.com/proThis system has been minimized by removing packages and content that arenot required on a system that users do not log into.To restore this content, you can run the 'unminimize' command.node01 ~ β cd /etc/kubernetes/manifests/node01 /etc/kubernetes/manifests β lsnode01 /etc/kubernetes/manifests β vi static.yamlnode01 /etc/kubernetes/manifests β cat static.yamlapiVersion: v1kind: Podmetadata: labels: run: nginx-critical name: nginx-criticalspec: containers: - image: nginx name: nginx-critical resources: {} dnsPolicy: ClusterFirst restartPolicy: Alwaysstatus: {}node01 /etc/kubernetes/manifests β exitlogoutConnection to node01 closed.controlplane ~ β k get pod -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESnginx-critical-node01 1/1 Running 0 23s 172.17.1.23 node01 <none> <none>nginx-deploy-5846bc77f5-2szm4 1/1 Running 0 31m 172.17.1.14 node01 <none> <none>nginx-resolver 1/1 Running 0 12m 172.17.1.15 node01 <none> <none>
Create HPA (10)
controlplane ~ β lsCKA csr.yaml local-sc.yaml logging-deploy.yaml rbac.yaml static.yaml webapp-hpa.yaml webapp-ingress.yamlcontrolplane ~ β vi webapp-hpa.yamlcontrolplane ~ β cat webapp-hpa.yamlapiVersion: autoscaling/v2kind: HorizontalPodAutoscalermetadata: name: backend-hpa namespace: backendspec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: backend-deployment minReplicas: 3 maxReplicas: 15 metrics: - type: Resource resource: name: memory target: type: Utilization averageUtilization: 65controlplane ~ β k apply -f webapp-hpa.yamlhorizontalpodautoscaler.autoscaling/backend-hpa unchangedcontrolplane ~ β k -n backend describe hpaName: backend-hpaNamespace: backendLabels: <none>Annotations: <none>CreationTimestamp: Sat, 08 Nov 2025 11:57:11 +0000Reference: Deployment/backend-deploymentMetrics: ( current / target ) resource memory on pods (as a percentage of request): <unknown> / 65%Min replicas: 3Max replicas: 15Deployment pods: 3 current / 0 desiredConditions: Type Status Reason Message ---- ------ ------ ------- AbleToScale True SucceededGetScale the HPA controller was able to get the target's current scale ScalingActive False FailedGetResourceMetric the HPA was unable to compute the replica count: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedGetResourceMetric 4s (x6 over 79s) horizontal-pod-autoscaler failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io) Warning FailedComputeMetricsReplicas 4s (x6 over 79s) horizontal-pod-autoscaler invalid metrics (1 invalid out of 1), first error is: failed to get memory resource metric value: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)
controlplane ~ β k create configmap app-config -n cm-namespace --from-literal=ENV=production --from-literal=LOG_LEVEL=info
configmap/app-config created
controlplane ~ β k -n cm-namespace get cm
NAME DATA AGE
app-config 2 13s
kube-root-ca.crt 1 58s
controlplane ~ β k -n cm-namespace get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
cm-webapp 1/1 1 1 2m26s
controlplane ~ β k edit -n cm-namespace deployment cm-webapp
...
spec:
containers:
- image: nginx
imagePullPolicy: Always
name: nginx
# ConfigMap μ€μ μΆκ°
envFrom:
- configMapRef:
name: app-config
...
deployment.apps/cm-webapp edited
Create PriorityClass (8)
controlplane ~ β k create priorityclass low-priority --value=50000
priorityclass.scheduling.k8s.io/low-priority created
controlplane ~ β k -n low-priority get pod
NAME READY STATUS RESTARTS AGE
lp-pod 1/1 Running 0 41s
controlplane ~ β k -n low-priority edit pod lp-pod
error: pods "lp-pod" is invalid
A copy of your changes has been stored to "/tmp/kubectl-edit-1609190342.yaml"
error: Edit cancelled, no valid changes were saved.
controlplane ~ β k replace -f /tmp/kubectl-edit-1609190342.yaml --force
pod "lp-pod" deleted from low-priority namespace
Error from server (Forbidden): pods "lp-pod" is forbidden: the integer value of priority (0) must not be provided in pod spec; priority admission controller computed 50000 from the given PriorityClass name
# spec.priority μ κ±°
controlplane ~ β vi /tmp/kubectl-edit-1609190342.yaml
controlplane ~ β k replace -f /tmp/kubectl-edit-1609190342.yaml --force
pod/lp-pod replaced
Create NetworkPolicy (8)
controlplane ~ β k get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
np-test-1 1/1 Running 0 4m27s run=np-test-1
pvviewer 1/1 Running 0 16m <none>
controlplane ~ β cat np.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-nptest
namespace: default
spec:
podSelector:
matchLabels:
run: np-test-1
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 80
controlplane ~ β k apply -f np.yaml
networkpolicy.networking.k8s.io/ingress-to-nptest created
Taint and Toleration (12)
controlplane ~ β k get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 36m v1.34.0
node01 Ready <none> 35m v1.34.0
controlplane ~ β k taint node node01 env_type=production:NoSchedule
node/node01 tainted
controlplane ~ β k describe node node01 | grep -i taint
Taints: env_type=production:NoSchedule
controlplane ~ β k run dev-redis --image=redis:alpine
pod/dev-redis created
controlplane ~ β vi prod-redis.yaml
controlplane ~ β cat prod-redis.yaml
apiVersion: v1
kind: Pod
metadata:
name: prod-redis
spec:
containers:
- name: prod-redis
image: redis:alpine
tolerations:
- key: "env_type"
operator: "Equal"
value: "production"
effect: "NoSchedule"
controlplane ~ β k apply -f prod-redis.yaml
pod/prod-redis created
controlplane ~ β k get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dev-redis 1/1 Running 0 3m5s 172.17.0.5 controlplane <none> <none>
np-test-1 1/1 Running 0 11m 172.17.1.8 node01 <none> <none>
prod-redis 1/1 Running 0 7s 172.17.1.9 node01 <none> <none>
pvviewer 1/1 Running 0 23m 172.17.1.3 node01 <none> <none>
Inspect PVC and PV (6)
controlplane ~ β k get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
app-pv 1Gi RWO Retain Available <unset> 40s
controlplane ~ β k get pvc -n storage-ns
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
app-pvc Pending <unset> 59s
controlplane ~ β k get pvc -n storage-ns -o yaml
apiVersion: v1
items:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"app-pvc","namespace":"storage-ns"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"1Gi"}}}}
creationTimestamp: "2025-11-26T14:04:47Z"
finalizers:
- kubernetes.io/pvc-protection
name: app-pvc
namespace: storage-ns
resourceVersion: "5401"
uid: e0092a4c-9d4d-47ad-b091-f23c1a8dfae4
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
volumeMode: Filesystem
status:
phase: Pending
kind: List
metadata:
resourceVersion: ""
# accessModes μμ
controlplane ~ β k edit pvc -n storage-ns app-pvc
error: persistentvolumeclaims "app-pvc" is invalid
A copy of your changes has been stored to "/tmp/kubectl-edit-1829119349.yaml"
error: Edit cancelled, no valid changes were saved.
controlplane ~ β k replace -f /tmp/kubectl-edit-1829119349.yaml --force
persistentvolumeclaim "app-pvc" deleted from storage-ns namespace
persistentvolumeclaim/app-pvc replaced
controlplane ~ β k get pvc -n storage-ns
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
app-pvc Bound app-pv 1Gi RWO <unset> 7s
Fix kubeconfig (8)
controlplane ~ β cat CKA/super.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ...
server: https://controlplane:9999
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
users:
- name: kubernetes-admin
user:
client-certificate-data: ...
client-key-data: ...
controlplane ~ β k get node --kubeconfig=/root/CKA/super.kubeconfig
E1126 14:09:25.390277 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.390610 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.392730 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.393061 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.394498 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
The connection to the server controlplane:9999 was refused - did you specify the right host or port?
controlplane ~ β sudo netstat -tulnp | grep kube-apiserver
tcp6 0 0 :::6443 :::* LISTEN 2847/kube-apiserver
# port μμ
controlplane ~ β vi /root/CKA/super.kubeconfig
controlplane ~ β k get node --kubeconfig=/root/CKA/super.kubeconfig
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 49m v1.34.0
node01 Ready <none> 48m v1.34.0
Fix controller-manager (10)
controlplane ~ β k get deploy nginx-deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 1/1 1 1 5m36s
controlplane ~ β k scale deployment nginx-deploy --replicas=3
deployment.apps/nginx-deploy scaled
controlplane ~ β k get deploy nginx-deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 1/3 1 1 6m16s
# deployment-controller μμ μ΄λ²€νΈκ° μ λ¬λμ§ μμ
controlplane ~ β k describe deploy nginx-deploy | grep -A5 Events
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 8m36s deployment-controller Scaled up replica set nginx-deploy-59874dbc6b from 0 to 1
# controller-manager κ° μμ
controlplane ~ β k get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-587f6db6c5-bkn26 1/1 Running 0 55m
canal-jg7mj 2/2 Running 0 54m
canal-lmhj8 2/2 Running 0 55m
coredns-6678bcd974-jkff2 1/1 Running 0 55m
coredns-6678bcd974-wjcgs 1/1 Running 0 55m
etcd-controlplane 1/1 Running 0 55m
kube-apiserver-controlplane 1/1 Running 0 55m
kube-proxy-mqwb8 1/1 Running 0 55m
kube-proxy-vcc8z 1/1 Running 0 54m
kube-scheduler-controlplane 1/1 Running 0 55m
# manifest μμ
controlplane ~ β vi /etc/kubernetes/manifests/kube-controller-manager.yaml
controlplane ~ β k get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-587f6db6c5-bkn26 1/1 Running 0 58m
canal-jg7mj 2/2 Running 0 57m
canal-lmhj8 2/2 Running 0 58m
coredns-6678bcd974-jkff2 1/1 Running 0 58m
coredns-6678bcd974-wjcgs 1/1 Running 0 58m
etcd-controlplane 1/1 Running 0 58m
kube-apiserver-controlplane 1/1 Running 0 58m
kube-controller-manager-controlplane 1/1 Running 0 21s
kube-proxy-mqwb8 1/1 Running 0 58m
kube-proxy-vcc8z 1/1 Running 0 57m
kube-scheduler-controlplane 1/1 Running 0 58m
controlplane ~ β k get deploy nginx-deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 3/3 3 3 12m
Create HPA (6)
controlplane ~ β vi hpa.yaml
controlplane ~ β cat hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: api-hpa
namespace: api
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: api-deployment
minReplicas: 1
maxReplicas: 20
metrics:
- type: Pods
pods:
metric:
name: requests_per_second
target:
type: AverageValue
averageValue: "1000"
controlplane ~ β k apply -f hpa.yaml
horizontalpodautoscaler.autoscaling/api-hpa created
controlplane ~ β k describe hpa -n api
Name: api-hpa
Namespace: api
Labels: <none>
Annotations: <none>
CreationTimestamp: Wed, 26 Nov 2025 14:29:06 +0000
Reference: Deployment/api-deployment
Metrics: ( current / target )
"requests_per_second" on pods: <unknown> / 1k
Min replicas: 1
Max replicas: 20
Deployment pods: 0 current / 0 desired
Events: <none>
Create HTTPRoute (6)
controlplane ~ β k get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 71m
np-test-service ClusterIP 172.20.59.134 <none> 80/TCP 42m
web-service ClusterIP 172.20.154.119 <none> 80/TCP 4m34s
web-service-v2 ClusterIP 172.20.191.4 <none> 80/TCP 4m34s
controlplane ~ β k get gateway
NAME CLASS ADDRESS PROGRAMMED AGE
web-gateway nginx True 4m38s
controlplane ~ β vi hr.yaml
controlplane ~ β cat hr.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: web-route
spec:
parentRefs:
- name: web-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: web-service
port: 80
weight: 80
- name: web-service-v2
port: 80
weight: 20
controlplane ~ β k apply -f hr.yaml
httproute.gateway.networking.k8s.io/web-route created
Helm install (4)
controlplane ~ β helm lint /root/new-version/
==> Linting /root/new-version/
[INFO] Chart.yaml: icon is recommended
1 chart(s) linted, 0 chart(s) failed
controlplane ~ β helm install --generate-name /root/new-version/
NAME: new-version-1764167838
LAST DEPLOYED: Wed Nov 26 14:37:18 2025
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
controlplane ~ β helm list
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
new-version-1764167838 default 1 2025-11-26 14:37:18.205659093 +0000 UTC deployed webpage-server-02-0.1.1v2
webpage-server-01 default 1 2025-11-26 14:35:32.19008219 +0000 UTC deployed webpage-server-01-0.1.0v1
controlplane ~ β helm uninstall webpage-server-01
release "webpage-server-01" uninstalled
controlplane ~ β helm list
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
new-version-1764167838 default 1 2025-11-26 14:37:18.205659093 +0000 UTC deployed webpage-server-02-0.1.1v2
Identify pod CIDR (4)
controlplane ~ β k get node
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 77m v1.34.0
node01 Ready <none> 76m v1.34.0
controlplane ~ β k get node controlplane -o yaml | grep -A5 spec
spec:
podCIDR: 172.17.0.0/24
podCIDRs:
- 172.17.0.0/24
status:
addresses:
controlplane ~ β k get node -o jsonpath='{.items[0].spec.podCIDR}' > /root/pod-cidr.txt
controlplane ~ β cat /root/pod-cidr.txt
172.17.0.0/24