K8s Cluster Upgrade


# k8s apt repository ๋ฆฌ์ŠคํŠธ ํŒŒ์ผ ์ˆ˜์ •
echo -e "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
 
# apt ์—…๋ฐ์ดํŠธ ํ›„ kubeadm ์ตœ์‹  ๋ฒ„์ „ ํ™•์ธ
apt update
apt-cache madison kubeadm
 
# kubeadm 1.34.0-1.1 ์„ค์น˜
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.34.0-1.1 && \
apt-mark hold kubeadm
 
# kubeadm 1.34.0-1.1 ์„ค์น˜ ํ™•์ธ
kubeadm version
 
# kubernetes ์ปดํฌ๋„ŒํŠธ ์—…๊ทธ๋ ˆ์ด๋“œ
kubeadm upgrade plan v1.34.0
kubeadm upgrade apply v1.34.0
 
# kubelet, kubectl 1.34.0-1.1 ์„ค์น˜
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.34.0-1.1 kubectl=1.34.0-1.1 && \
apt-mark hold kubelet kubectl
 
# kubelet ์žฌ์‹œ์ž‘
sudo systemctl daemon-reload
sudo systemctl restart kubelet
 
# kubelet 1.34.0 ์—…๊ทธ๋ ˆ์ด๋“œ ํ™•์ธ
kubectl get nodes
# ๋จผ์ € node01 ์— ์‹คํ–‰์ค‘์ธ Pod ์„ drain ํ•˜๊ณ 
kubectl drain node01 --ignore-daemonsets
 
# node01 ์œผ๋กœ ์ ‘์†ํ•œ ๋’ค
ssh node01
 
# k8s apt repository ๋ฆฌ์ŠคํŠธ ํŒŒ์ผ ์ˆ˜์ •
echo -e "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
 
# apt ์—…๋ฐ์ดํŠธ ํ›„ kubeadm ์ตœ์‹  ๋ฒ„์ „ ํ™•์ธ
apt update
apt-cache madison kubeadm
 
# kubeadm 1.34.0-1.1 ์„ค์น˜
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.34.0-1.1 && \
apt-mark hold kubeadm
 
# ์›Œ์ปค๋…ธ๋“œ ๋กœ์ปฌ kubelet ์—…๊ทธ๋ ˆ์ด๋“œ
sudo kubeadm upgrade node
 
# kubelet, kubectl 1.34.0-1.1 ์„ค์น˜
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.34.0-1.1 kubectl=1.34.0-1.1 && \
apt-mark hold kubelet kubectl
 
# kubelet ์žฌ์‹œ์ž‘
sudo systemctl daemon-reload
sudo systemctl restart kubelet
 
# controlplane ์œผ๋กœ ๋ณต๊ท€
exit
 
# ์›Œ์ปค๋…ธ๋“œ cordon ํ•ด์ œ
kubectl uncordon node01
 
# ์›Œ์ปค๋…ธ๋“œ kubelet 1.34.0 ์—…๊ทธ๋ ˆ์ด๋“œ ํ™•์ธ
kubectl get nodes

Extracting JSON information


k -n=admin2406 get deployments.apps -o=json
k -n=admin2406 get deployments.apps -o=custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[0].image,READY_REPLICAS:.status.readyReplicas,NAMESPACE:.metadata.namespace > /opt/admin2406_data

Fix kubeconfig


# kubeconfig ํŒŒ์ผ์„ ํ™•์ธํ•˜๊ณ 
kubectl get pods --kubeconfig /root/CKA/admin.kubeconfig
 
# ๊ธฐ๋ณธ ์„ค์ •์„ ํ™•์ธํ•œ ๋’ค
cat ~/.kube/config
 
# ํ‹€๋ฆฐ ๋ถ€๋ถ„์„ ์ˆ˜์ •ํ•˜๊ณ 
vi /root/CKA/admin.kubeconfig
 
# ๋‹ค์‹œ kubeconfig ํŒŒ์ผ์„ ํ™•์ธ
kubectl get pods --kubeconfig /root/CKA/admin.kubeconfig

Rolling Update


kubectl create deployment nginx-deploy --image=nginx:1.16
kubectl set image deployment/nginx-deploy nginx=nginx:1.17

Fix PVC


# Deployment ํ™•์ธ
kubectl get deployment -n alpha alpha-mysql  -o yaml | yq e .spec.template.spec.containers -
 
# Pod ์—๋Ÿฌ ํ™•์ธ
kubectl get pods -n alpha
kubectl describe pod -n alpha alpha-mysql-xxxxxxxx-xxxxx
 
# PV ํ™•์ธ
kubectl get pv alpha-pv
# PVC ์ˆ˜์ •
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-alpha-pvc
  namespace: alpha
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: slow

ETCD Backup


# etcd ์Šค๋ƒ…์ƒท ์ƒ์„ฑ
export ETCDCTL_API=3
etcdctl snapshot save /opt/etcd-backup.db \
  --cacert=/etc/kubernetes/pki/etcd/ca.crt \
  --cert=/etc/kubernetes/pki/etcd/server.crt \
  --key=/etc/kubernetes/pki/etcd/server.key
etcdctl snapshot status snapshot.db

Create Pod with Command and Secret Volume


# YAML ํŒŒ์ผ ์ƒ์„ฑ
kubectl run secret-1401 -n admin1401 --image busybox --dry-run=client -o yaml --command -- sleep 4800 > admin.yaml
 
vi admin.yaml
 
# ํ•„์š”ํ•œ ๋ถ€๋ถ„ ์ถ”๊ฐ€
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: secret-1401
  name: secret-1401
  namespace: admin1401
spec:
  volumes:
  - name: secret-volume
    secret:
      secretName: dotfile-secret
  containers:
  - command:
    - sleep
    - "4800"
    image: busybox
    name: secret-admin
    volumeMounts:
    - name: secret-volume
      readOnly: true
      mountPath: /etc/secret-volume

์œ ํ˜•

  1. ETCD Backup, Restore
  2. Control Plane Upgrade, kubeadm, kubelet, kubectl
  3. Network Policy ๋ฐฐํฌ
  4. Ingress ๋ฐฐํฌ
  5. Deployment ๋ฐฐํฌ
  6. Pod ์— label ํ• ๋‹น ํ›„ ๋ฐฐํฌ
  7. WorkerNode Not Ready to Ready ํŠธ๋Ÿฌ๋ธ”์ŠˆํŒ…
  8. Node Selector ๋ฅผ ํ†ตํ•ด ํŠน์ • Node ์— Pod ๋ฐฐํฌ
  9. PVC ์ƒ์„ฑ (PV ์™€ SC ๋Š” ์ฃผ์–ด์ง) + kubectl edit ๋˜๋Š” kubectl patch ๋กœ ์šฉ๋Ÿ‰ ๋ณ€๊ฒฝ
  10. Pod ๋กœ๊ทธ ํ™•์ธํ•˜์—ฌ ํŠน์ • ๋‹จ์–ด๊ฐ€ ๋“ค์–ด๊ฐ„ ๋กœ๊ทธ ์ถ”์ถœ

Create Pod with multiple containers (8)

k -n mc-namespace run mc-pod --image=nginx:1-alpine -o=yaml --dry-run=client > mc-pod.yaml

vi mc-pod.yaml
# mc-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  namespace: mc-namespace
  name: mc-pod
spec:
  containers:
  - name: mc-pod-1
    image: nginx:1-alpine
    env:
    - name: NODE_NAME
      valueFrom:
        fieldRef:
          fieldPath: spec.nodeName
  - name: mc-pod-2
    image: busybox:1
    command:
    - "sh"
    - "-c"
    - "while true; do date >> /var/log/shared/date.log; sleep 1; done"
    volumeMounts:
    - name: shared-volume
      mountPath: /var/log/shared
  - name: mc-pod-3
    image: busybox:1
    command:
    - "sh"
    - "-c"
    - "tail -f /var/log/shared/date.log"
    volumeMounts:
    - name: shared-volume
      mountPath: /var/log/shared
  volumes:
  - name: shared-volume
    emptyDir: {}
k -n mc-namespace get pod
k -n mc-namespace logs mc-pod -c mc-pod-3 -f

Install container runtime (7)

~ ssh bob@node01
bob@node01's password:

bob@node01 ~ sudo su

root@node01 /home/bob cd /root

root@node01 ~ ls
cri-docker_0.3.16.3-0.debian.deb

root@node01 ~ dpkg -i ./cri-docker_0.3.16.3-0.debian.deb

root@node01 ~ systemctl start cri-docker

root@node01 ~ systemctl enable cri-docker

root@node01 ~ systemctl status cri-docker

root@node01 ~ systemctl is-enabled cri-docker
enabled

Expose pod with Service (8)

controlplane ~ k get pods
NAME        READY   STATUS    RESTARTS   AGE
messaging   1/1     Running   0          4m2s
 
controlplane ~ k expose pod messaging --port=6379 --name=messaging-service
service/messaging-service exposed
 
controlplane ~ k get svc
NAME                TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
kubernetes          ClusterIP   172.20.0.1     <none>        443/TCP    48m
messaging-service   ClusterIP   172.20.26.32   <none>        6379/TCP   6s

Create Deployment (10)

controlplane ~ k create deployment hr-web-app --image=kodekloud/webapp-color --replicas=2

Expose Depolyment with Service (8)

controlplane ~ k expose deployment hr-web-app --type=NodePort --port=8080 --name=hr-web-app-service --dry-run=client -o=yaml > hr-svc.yaml
 
controlplane ~ vi hr-svc.yaml 
 
controlplane ~ k apply -f hr-svc.yaml 
service/hr-web-app-service created
 
controlplane ~ k describe svc hr-web-app-service 
Name:                     hr-web-app-service
Namespace:                default
Labels:                   app=hr-web-app
Annotations:              <none>
Selector:                 app=hr-web-app
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       172.20.13.84
IPs:                      172.20.13.84
Port:                     <unset>  8080/TCP
TargetPort:               8080/TCP
NodePort:                 <unset>  30082/TCP
Endpoints:                172.17.0.11:8080,172.17.0.10:8080
Session Affinity:         None
External Traffic Policy:  Cluster
Internal Traffic Policy:  Cluster
Events:                   <none>

Create PV (8)

controlplane ~ vi pv-analytics.yaml
 
controlplane ~ cat pv-analytics.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-analytics
spec:
  capacity:
    storage: 100Mi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteMany
  hostPath:
    path: /pv/data-analytics
 
controlplane ~ k apply -f pv-analytics.yaml 
persistentvolume/pv-analytics created
 
controlplane ~ k get pv
NAME           CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
pv-analytics   100Mi      RWX            Retain           Available                          <unset>                          2s

Create HPA (10)

controlplane ~ โžœ  vi webapp-hpa.yaml 
 
controlplane ~ โžœ  cat webapp-hpa.yaml 
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: webapp-hpa
  namespace: default
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: kkapp-deploy
  minReplicas: 2
  maxReplicas: 10
  metrics:
   - type: Resource
     resource:
       name: cpu
       target:
         type: Utilization
         averageUtilization: 50
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 300
 
controlplane ~ โžœ  k apply  -f webapp-hpa.yaml 
horizontalpodautoscaler.autoscaling/webapp-hpa created
 
controlplane ~ โžœ  k get hpa
NAME         REFERENCE                 TARGETS              MINPODS   MAXPODS   REPLICAS   AGE
webapp-hpa   Deployment/kkapp-deploy   cpu: <unknown>/50%   2         10        0          5s

Create VPA (9)

controlplane ~ โžœ  vi analytics-vpa.yaml 
 
controlplane ~ โžœ  cat analytics-vpa.yaml 
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
  name: analytics-vpa
  namespace: default
spec:
  targetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: analytics-deployment
  updatePolicy:
    updateMode: Auto
 
controlplane ~ โžœ  k apply -f analytics-vpa.yaml 
verticalpodautoscaler.autoscaling.k8s.io/analytics-vpa created
 
controlplane ~ โžœ  k get vpa
NAME            MODE   CPU   MEM   PROVIDED   AGE
analytics-vpa   Auto               False      38s

Create Gateway (6)

controlplane ~ โžœ  vi web-gateway.yaml
 
controlplane ~ โžœ  cat web-gateway.yaml 
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
  name: web-gateway
  namespace: nginx-gateway
spec:
  gatewayClassName: nginx
  listeners:
  - name: http
    protocol: HTTP
    port: 80
 
controlplane ~ โžœ  k apply -f web-gateway.yaml 
gateway.gateway.networking.k8s.io/web-gateway created
 
controlplane ~ โžœ  k -n nginx-gateway get gateway
NAME          CLASS   ADDRESS   PROGRAMMED   AGE
web-gateway   nginx             True         13s

Helm Chart upgrade (8)

controlplane ~ โžœ  helm list -n kk-ns
NAME            NAMESPACE       REVISION        UPDATED                                 STATUS          CHART           APP VERSION
kk-mock1        kk-ns           1               2025-11-06 14:28:24.255194099 +0000 UTC deployed        nginx-18.1.0    1.27.0     
 
controlplane ~ โžœ  helm repo list
NAME            URL                               
kk-mock1        https://charts.bitnami.com/bitnami
 
controlplane ~ โžœ  helm repo update
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "kk-mock1" chart repository
Update Complete. โŽˆHappy Helming!โŽˆ
 
controlplane ~ โžœ  helm repo list
NAME            URL                               
kk-mock1        https://charts.bitnami.com/bitnami
 
controlplane ~ โžœ  helm search repo nginx
NAME                                    CHART VERSION   APP VERSION   DESCRIPTION                                       
kk-mock1/nginx                          22.2.4          1.29.3        NGINX Open Source is a web server that can be a...
kk-mock1/nginx-ingress-controller       12.0.7          1.13.1        NGINX Ingress Controller is an Ingress controll...
kk-mock1/nginx-intel                    2.1.15          0.4.9         DEPRECATED NGINX Open Source for Intel is a lig...
 
controlplane ~ โžœ  helm search repo nginx --versions | grep 18.1.15
kk-mock1/nginx                          18.1.15         1.27.1        NGINX Open Source is a web server that can be a...
 
controlplane ~ โžœ  helm upgrade kk-mock1 kk-mock1/nginx --version=18.1.5 -n kk-ns
Release "kk-mock1" has been upgraded. Happy Helming!
NAME: kk-mock1
LAST DEPLOYED: Thu Nov  6 14:31:52 2025
NAMESPACE: kk-ns
STATUS: deployed
REVISION: 2
TEST SUITE: None
NOTES:
CHART NAME: nginx
CHART VERSION: 18.1.5
APP VERSION: 1.27.0
 
** Please be patient while the chart is being deployed **
NGINX can be accessed through the following DNS name from within your cluster:
 
    kk-mock1-nginx.kk-ns.svc.cluster.local (port 80)
 
To access NGINX from outside the cluster, follow the steps below:
 
1. Get the NGINX URL by running these commands:
 
  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
        Watch the status with: 'kubectl get svc --namespace kk-ns -w kk-mock1-nginx'
 
    export SERVICE_PORT=$(kubectl get --namespace kk-ns -o jsonpath="{.spec.ports[0].port}" services kk-mock1-nginx)
    export SERVICE_IP=$(kubectl get svc --namespace kk-ns kk-mock1-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
    echo "http://${SERVICE_IP}:${SERVICE_PORT}"
 
WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
  - cloneStaticSiteFromGit.gitSync.resources
  - resources
+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
 
โš  SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.
 
Substituted images detected:
  - %!s(<nil>)/:%!s(<nil>)
 
controlplane ~ โžœ  helm list -n kk-ns
NAME            NAMESPACE       REVISION        UPDATED                                        STATUS          CHART           APP VERSION
kk-mock1        kk-ns           2               2025-11-06 14:31:52.339403678 +0000 UTC        deployed        nginx-18.1.5    1.27.0

Create StorageClass (6)

controlplane ~ โžœ  vi local-sc.yaml
 
controlplane ~ โžœ  k apply -f local-sc.yaml 
storageclass.storage.k8s.io/local-sc created
 
controlplane ~ โžœ  k get sc
NAME                 PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-sc (default)   kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   true                   4s
 
controlplane ~ โžœ  cat local-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-sc
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/no-provisioner
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

Create Deployment (10)

controlplane ~ โžœ  k create deployment logging-deployment --image=busybox --namespace=logging-ns --replicas=1 --dry-run=client -o=yaml > logging-deployment.yaml
 
controlplane ~ โžœ  vi logging-deployment.yaml 
 
controlplane ~ โžœ  cat logging-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: logging-deployment
  namespace: logging-ns
spec:
  replicas: 1
  selector:
    matchLabels:
      app: logging-deployment
  template:
    metadata:
      labels:
        app: logging-deployment
    spec:
      volumes:
      - name: shared-volume
        emptyDir: {}
      containers:
      - image: busybox
        name: app-container
        command:
        - "sh"
        - "-c"
        - "while true; do echo 'Log entry' >> /var/log/app/app.log; sleep 5; done"
        resources: {}
        volumeMounts:
         - mountPath: /var/log/app
           name: shared-volume
      - image: busybox
        name: log-agent
        command:
        - "sh"
        - "-c"
        - "tail -f /var/log/app/app.log"
        volumeMounts:
         - mountPath: /var/log/app
           name: shared-volume
status: {}
 
controlplane ~ โžœ  k create -f logging-deployment.yaml 
deployment.apps/logging-deployment created

Create Ingress (10)

controlplane ~ โžœ  k -n ingress-ns get deploy
NAME            READY   UP-TO-DATE   AVAILABLE   AGE
webapp-deploy   1/1     1            1           10m
 
controlplane ~ โžœ  k -n ingress-ns get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
webapp-svc   ClusterIP   172.20.133.154   <none>        80/TCP    10m
 
controlplane ~ โžœ  vi webapp-ingress.yaml 
 
controlplane ~ โžœ  cat webapp-ingress.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: webapp-ingress
  namespace: ingress-ns
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  ingressClassName: nginx
  rules:
  - host: "kodekloud-ingress.app"
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: webapp-svc
            port:
              number: 80
 
 
controlplane ~ โžœ  k create -f webapp-ingress.yaml 
ingress.networking.k8s.io/webapp-ingress created
 
controlplane ~ โžœ  k -n ingress-ns get ingress
NAME             CLASS   HOSTS                   ADDRESS   PORTS   AGE
webapp-ingress   nginx   kodekloud-ingress.app             80      9s
 
controlplane ~ โžœ  curl -s http://kodekloud-ingress.app/
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
 
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
 
<p><em>Thank you for using nginx.</em></p>
</body>
</html>

Create Role and RoleBinding (10)

controlplane ~ โžœ  cat /root/CKA/john.csr | base64 | tr -d '\n'
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
controlplane ~ โžœ  vi csr.yaml 
 
controlplane ~ โžœ  cat csr.yaml 
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
  name: john-developer
spec:
  signerName: kubernetes.io/kube-apiserver-client
  request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
  usages:
  - digital signature
  - key encipherment
  - client auth
 
controlplane ~ โžœ  k apply -f csr.yaml 
certificatesigningrequest.certificates.k8s.io/john-developer created
 
controlplane ~ โžœ  k get csr
NAME             AGE   SIGNERNAME                                    REQUESTOR                  REQUESTEDDURATION   CONDITION
csr-f79dh        38m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:pr9dl6    <none>              Approved,Issued
csr-wgd7n        39m   kubernetes.io/kube-apiserver-client-kubelet   system:node:controlplane   <none>              Approved,Issued
john-developer   4s    kubernetes.io/kube-apiserver-client           kubernetes-admin           <none>              Pending
 
controlplane ~ โžœ  k certificate approve john-developer
certificatesigningrequest.certificates.k8s.io/john-developer approved
 
controlplane ~ โžœ  k get csr
NAME             AGE   SIGNERNAME                                    REQUESTOR                  REQUESTEDDURATION   CONDITION
csr-f79dh        39m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:pr9dl6    <none>              Approved,Issued
csr-wgd7n        40m   kubernetes.io/kube-apiserver-client-kubelet   system:node:controlplane   <none>              Approved,Issued
john-developer   28s   kubernetes.io/kube-apiserver-client           kubernetes-admin           <none>              Approved,Issued
 
controlplane ~ โžœ  vi rbac.yaml 
 
controlplane ~ โžœ  cat rbac.yaml 
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  namespace: development
  name: developer
rules:
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["create", "get", "update", "list", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: john-dev-role-binding
  namespace: development
subjects:
- kind: User
  name: john
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: Role
  name: developer 
  apiGroup: rbac.authorization.k8s.io
 
 
controlplane ~ โžœ  k apply -f rbac.yaml 
role.rbac.authorization.k8s.io/developer created
rolebinding.rbac.authorization.k8s.io/john-dev-role-binding created
 
controlplane ~ โžœ  k auth can-i create pods --as=john -n development
yes
 
controlplane ~ โžœ  k auth can-i create pods --as=john
no

Create ClusterIP Service and test dns lookup (10)

controlplane ~ โžœ  k run nginx-resolver --image=nginx
pod/nginx-resolver created
 
controlplane ~ โžœ  k expose pod nginx-resolver --name=nginx-resolver-svc --port=80 --target-port=80 --type=ClusterIP
service/nginx-resolver-svc exposed
 
controlplane ~ โžœ  k get svc
NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
kubernetes           ClusterIP   172.20.0.1      <none>        443/TCP   51m
nginx-resolver-svc   ClusterIP   172.20.137.21   <none>        80/TCP    4s
 
controlplane ~ โžœ  k describe svc nginx-resolver-svc 
Name:                     nginx-resolver-svc
Namespace:                default
Labels:                   run=nginx-resolver
Annotations:              <none>
Selector:                 run=nginx-resolver
Type:                     ClusterIP
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       172.20.137.21
IPs:                      172.20.137.21
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
Endpoints:                172.17.1.15:80
Session Affinity:         None
Internal Traffic Policy:  Cluster
Events:                   <none>
 
controlplane ~ โžœ  k get pod -o wide
NAME                            READY   STATUS    RESTARTS   AGE   IP            NODE     NOMINATED NODE   READINESS GATES
nginx-deploy-5846bc77f5-2szm4   1/1     Running   0          20m   172.17.1.14   node01   <none>           <none>
nginx-resolver                  1/1     Running   0          85s   172.17.1.15   node01   <none>           <none>
 
controlplane ~ โžœ  k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-svc > /root/CKA/nginx.svc
All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.
If you don't see a command prompt, try pressing enter.
 
controlplane ~ โžœ  cat /root/CKA/nginx.svc 
Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.local
 
Name:      nginx-resolver-svc
Address 1: 172.20.137.21 nginx-resolver-svc.default.svc.cluster.local
pod "test-nslookup" deleted from default namespace
 
controlplane ~ โžœ  k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup 172-17-1-15.default.pod > /r
oot/CKA/nginx.pod
All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.
If you don't see a command prompt, try pressing enter.
 
controlplane ~ โžœ  cat /root/CKA/nginx.pod
Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.local
 
Name:      172-17-1-15.default.pod
Address 1: 172.17.1.15 172-17-1-15.nginx-resolver-svc.default.svc.cluster.local
pod "test-nslookup" deleted from default namespace

Create StaticPod on node01 (10)

controlplane ~ โžœ  k get nodes
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   59m   v1.34.0
node01         Ready    <none>          59m   v1.34.0
 
controlplane ~ โžœ  k run nginx-critical --image=nginx --dry-run=client -o=yaml > static.yaml
 
controlplane ~ โžœ  cat static.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: nginx-critical
  name: nginx-critical
spec:
  containers:
  - image: nginx
    name: nginx-critical
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
 
controlplane ~ โžœ  ssh node01
Welcome to Ubuntu 22.04.5 LTS (GNU/Linux 5.15.0-1083-gcp x86_64)
 
 * Documentation:  https://help.ubuntu.com
 * Management:     https://landscape.canonical.com
 * Support:        https://ubuntu.com/pro
 
This system has been minimized by removing packages and content that are
not required on a system that users do not log into.
 
To restore this content, you can run the 'unminimize' command.
 
node01 ~ โžœ  cd /etc/kubernetes/manifests/
 
node01 /etc/kubernetes/manifests โžœ  ls
 
node01 /etc/kubernetes/manifests โžœ  vi static.yaml
 
node01 /etc/kubernetes/manifests โžœ  cat static.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: nginx-critical
  name: nginx-critical
spec:
  containers:
  - image: nginx
    name: nginx-critical
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
 
node01 /etc/kubernetes/manifests โžœ  exit
logout
Connection to node01 closed.
 
controlplane ~ โžœ  k get pod -o wide
NAME                            READY   STATUS    RESTARTS   AGE   IP            NODE     NOMINATED NODE   READINESS GATES
nginx-critical-node01           1/1     Running   0          23s   172.17.1.23   node01   <none>           <none>
nginx-deploy-5846bc77f5-2szm4   1/1     Running   0          31m   172.17.1.14   node01   <none>           <none>
nginx-resolver                  1/1     Running   0          12m   172.17.1.15   node01   <none>           <none>

Create HPA (10)

controlplane ~ โžœ  ls
CKA  csr.yaml  local-sc.yaml  logging-deploy.yaml  rbac.yaml  static.yaml  webapp-hpa.yaml  webapp-ingress.yaml
 
controlplane ~ โžœ  vi webapp-hpa.yaml 
 
controlplane ~ โžœ  cat webapp-hpa.yaml 
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: backend-hpa
  namespace: backend
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: backend-deployment
  minReplicas: 3
  maxReplicas: 15
  metrics:
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 65
 
controlplane ~ โžœ  k apply -f webapp-hpa.yaml 
horizontalpodautoscaler.autoscaling/backend-hpa unchanged
 
controlplane ~ โžœ  k -n backend describe hpa
Name:                                                     backend-hpa
Namespace:                                                backend
Labels:                                                   <none>
Annotations:                                              <none>
CreationTimestamp:                                        Sat, 08 Nov 2025 11:57:11 +0000
Reference:                                                Deployment/backend-deployment
Metrics:                                                  ( current / target )
  resource memory on pods  (as a percentage of request):  <unknown> / 65%
Min replicas:                                             3
Max replicas:                                             15
Deployment pods:                                          3 current / 0 desired
Conditions:
  Type           Status  Reason                   Message
  ----           ------  ------                   -------
  AbleToScale    True    SucceededGetScale        the HPA controller was able to get the target's current scale
  ScalingActive  False   FailedGetResourceMetric  the HPA was unable to compute the replica count: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)
Events:
  Type     Reason                        Age               From                       Message
  ----     ------                        ----              ----                       -------
  Warning  FailedGetResourceMetric       4s (x6 over 79s)  horizontal-pod-autoscaler  failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)
  Warning  FailedComputeMetricsReplicas  4s (x6 over 79s)  horizontal-pod-autoscaler  invalid metrics (1 invalid out of 1), first error is: failed to get memory resource metric value: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)

Configure HTTPS Gateway (10)

controlplane ~ โžœ  k -n cka5673 get gateway
NAME          CLASS       ADDRESS   PROGRAMMED   AGE
web-gateway   kodekloud             Unknown      8s
 
controlplane ~ โžœ  k -n cka5673 get gateway -o yaml
apiVersion: v1
items:
- apiVersion: gateway.networking.k8s.io/v1
  kind: Gateway
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"gateway.networking.k8s.io/v1","kind":"Gateway","metadata":{"annotations":{},"name":"web-gateway","namespace":"cka5673"},"spec":{"gatewayClassName":"kodekloud","listeners":[{"name":"http","port":80,"protocol":"HTTP"}]}}
    creationTimestamp: "2025-11-08T12:05:44Z"
    generation: 1
    name: web-gateway
    namespace: cka5673
    resourceVersion: "10291"
    uid: d2ffe126-d82f-4213-94a9-86970e31b0a2
  spec:
    gatewayClassName: kodekloud
    listeners:
    - allowedRoutes:
        namespaces:
          from: Same
      name: http
      port: 80
      protocol: HTTP
  status:
    conditions:
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Accepted
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Programmed
kind: List
metadata:
  resourceVersion: ""
 
controlplane ~ โžœ  k -n cka5673 get gateway -o yaml > web-gateway.yaml
 
controlplane ~ โžœ  k -n cka5673 get secret
NAME            TYPE                DATA   AGE
kodekloud-tls   kubernetes.io/tls   2      2m18s
 
controlplane ~ โžœ  cat web-gateway.yaml 
apiVersion: v1
items:
- apiVersion: gateway.networking.k8s.io/v1
  kind: Gateway
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"gateway.networking.k8s.io/v1","kind":"Gateway","metadata":{"annotations":{},"name":"web-gateway","namespace":"cka5673"},"spec":{"gatewayClassName":"kodekloud","listeners":[{"name":"http","port":80,"protocol":"HTTP"}]}}
    creationTimestamp: "2025-11-08T12:05:44Z"
    generation: 1
    name: web-gateway
    namespace: cka5673
    resourceVersion: "10291"
    uid: d2ffe126-d82f-4213-94a9-86970e31b0a2
  spec:
    gatewayClassName: kodekloud
    listeners:
    - allowedRoutes:
        namespaces:
          from: Same
      name: https
      port: 443
      protocol: HTTPS
      hostname: kodekloud.com
      tls:
        certificateRefs:
        - name: kodekloud-tls
  status:
    conditions:
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Accepted
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Programmed
kind: List
metadata:
  resourceVersion: ""
 
controlplane ~ โžœ  k apply -f web-gateway.yaml 
gateway.gateway.networking.k8s.io/web-gateway configured

Helm (10)

controlplane ~ โžœ  helm list -A
NAME                    NAMESPACE               REVISION        UPDATED                                 STATUS          CHART                       APP VERSION
atlanta-page-apd        atlanta-page-04         1               2025-11-08 10:57:43.405721672 +0000 UTC deployed        atlanta-page-apd-0.1.0      1.16.0     
digi-locker-apd         digi-locker-02          1               2025-11-08 10:57:40.988036054 +0000 UTC deployed        digi-locker-apd-0.1.0       1.16.0     
security-alpha-apd      security-alpha-01       1               2025-11-08 10:57:40.109579755 +0000 UTC deployed        security-alpha-apd-0.1.0    1.16.0     
web-dashboard-apd       web-dashboard-03        1               2025-11-08 10:57:41.989424936 +0000 UTC deployed        web-dashboard-apd-0.1.0     1.16.0     
 
controlplane ~ โžœ  helm get manifest atlanta-page-apd -n atlanta-page-04
---
# Source: atlanta-page-apd/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: atlanta-page-sa
  labels:
    app: atlanta-page-apd
---
# Source: atlanta-page-apd/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: atlanta-page-svc
  labels:
    app: atlanta-page-apd-svc
spec:
  type: NodePort
  ports:
    - port: 80
      targetPort: http
      protocol: TCP
      name: http
  selector:
    app: atlanta-page-apd
---
# Source: atlanta-page-apd/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: atlanta-page-apd
  labels:
    app: atlanta-page-apd
spec:
  replicas: 4
  selector:
    matchLabels:
      app: atlanta-page-apd
  template:
    metadata:
      labels:
        app: atlanta-page-apd
    spec:
      serviceAccountName: atlanta-page-sa
      containers:
        - name: atlanta-page-apd
          image: "kodekloud/webapp-color:v1"
          imagePullPolicy: IfNotPresent
 
 
controlplane ~ โžœ  helm get manifest atlanta-page-apd -n atlanta-page-04 | grep -i webapp-color:v1
          image: "kodekloud/webapp-color:v1"
 
controlplane ~ โžœ  helm uninstall atlanta-page-apd -n atlanta-page-04
release "atlanta-page-apd" uninstalled

NetworkPolicy (6)

controlplane ~ โžœ  ls
CKA       kodekloud.crt  local-sc.yaml        net-pol-1.yaml  net-pol-3.yaml  static.yaml      webapp-ingress.yaml
csr.yaml  kodekloud.key  logging-deploy.yaml  net-pol-2.yaml  rbac.yaml       webapp-hpa.yaml  web-gateway.yaml
 
controlplane ~ โžœ  cat net-pol-1.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: net-policy-1
  namespace: backend
spec:
  podSelector: {}
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          access: allowed
    ports:
    - protocol: TCP
      port: 80
 
controlplane ~ โžœ  cat net-pol-2.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: net-policy-2
  namespace: backend
spec:
  podSelector: {}
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: frontend
    - namespaceSelector:
        matchLabels:
          name: databases
    ports:
    - protocol: TCP
      port: 80
 
controlplane ~ โžœ  cat net-pol-3.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: net-policy-3
  namespace: backend
spec:
  podSelector: {}
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: frontend
    ports:
    - protocol: TCP
      port: 80
 
controlplane ~ โžœ  k get ns --show-labels
NAME                STATUS   AGE     LABELS
atlanta-page-04     Active   75m     kubernetes.io/metadata.name=atlanta-page-04
backend             Active   18m     kubernetes.io/metadata.name=backend,name=backend
cka5673             Active   7m30s   kubernetes.io/metadata.name=cka5673
default             Active   81m     kubernetes.io/metadata.name=default
development         Active   51m     kubernetes.io/metadata.name=development
digi-locker-02      Active   75m     kubernetes.io/metadata.name=digi-locker-02
frontend            Active   93s     kubernetes.io/metadata.name=frontend,name=frontend
ingress-nginx       Active   66m     app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx,kubernetes.io/metadata.name=ingress-nginx
ingress-ns          Active   66m     kubernetes.io/metadata.name=ingress-ns
kube-node-lease     Active   81m     kubernetes.io/metadata.name=kube-node-lease
kube-public         Active   81m     kubernetes.io/metadata.name=kube-public
kube-system         Active   81m     kubernetes.io/metadata.name=kube-system
logging-ns          Active   72m     kubernetes.io/metadata.name=logging-ns
nginx-gateway       Active   75m     kubernetes.io/metadata.name=nginx-gateway
security-alpha-01   Active   75m     kubernetes.io/metadata.name=security-alpha-01
web-dashboard-03    Active   75m     kubernetes.io/metadata.name=web-dashboard-03
 
controlplane ~ โžœ  k apply -f net-pol-3.yaml 
networkpolicy.networking.k8s.io/net-policy-3 created

References