K8s Cluster Upgrade


# k8s apt repository 리슀트 파일 μˆ˜μ •
echo -e "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
 
# apt μ—…λ°μ΄νŠΈ ν›„ kubeadm μ΅œμ‹  버전 확인
apt update
apt-cache madison kubeadm
 
# kubeadm 1.34.0-1.1 μ„€μΉ˜
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.34.0-1.1 && \
apt-mark hold kubeadm
 
# kubeadm 1.34.0-1.1 μ„€μΉ˜ 확인
kubeadm version
 
# kubernetes μ»΄ν¬λ„ŒνŠΈ μ—…κ·Έλ ˆμ΄λ“œ
kubeadm upgrade plan v1.34.0
kubeadm upgrade apply v1.34.0
 
# kubelet, kubectl 1.34.0-1.1 μ„€μΉ˜
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.34.0-1.1 kubectl=1.34.0-1.1 && \
apt-mark hold kubelet kubectl
 
# kubelet μž¬μ‹œμž‘
sudo systemctl daemon-reload
sudo systemctl restart kubelet
 
# kubelet 1.34.0 μ—…κ·Έλ ˆμ΄λ“œ 확인
kubectl get nodes
# λ¨Όμ € node01 에 싀행쀑인 Pod 을 drain ν•˜κ³ 
kubectl drain node01 --ignore-daemonsets
 
# node01 으둜 μ ‘μ†ν•œ λ’€
ssh node01
 
# k8s apt repository 리슀트 파일 μˆ˜μ •
echo -e "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
 
# apt μ—…λ°μ΄νŠΈ ν›„ kubeadm μ΅œμ‹  버전 확인
apt update
apt-cache madison kubeadm
 
# kubeadm 1.34.0-1.1 μ„€μΉ˜
apt-mark unhold kubeadm && \
apt-get update && apt-get install -y kubeadm=1.34.0-1.1 && \
apt-mark hold kubeadm
 
# μ›Œμ»€λ…Έλ“œ 둜컬 kubelet μ—…κ·Έλ ˆμ΄λ“œ
sudo kubeadm upgrade node
 
# kubelet, kubectl 1.34.0-1.1 μ„€μΉ˜
apt-mark unhold kubelet kubectl && \
apt-get update && apt-get install -y kubelet=1.34.0-1.1 kubectl=1.34.0-1.1 && \
apt-mark hold kubelet kubectl
 
# kubelet μž¬μ‹œμž‘
sudo systemctl daemon-reload
sudo systemctl restart kubelet
 
# controlplane 으둜 볡귀
exit
 
# μ›Œμ»€λ…Έλ“œ cordon ν•΄μ œ
kubectl uncordon node01
 
# μ›Œμ»€λ…Έλ“œ kubelet 1.34.0 μ—…κ·Έλ ˆμ΄λ“œ 확인
kubectl get nodes

Extracting JSON information


k -n=admin2406 get deployments.apps -o=json
k -n=admin2406 get deployments.apps -o=custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[0].image,READY_REPLICAS:.status.readyReplicas,NAMESPACE:.metadata.namespace > /opt/admin2406_data

Fix kubeconfig


# kubeconfig νŒŒμΌμ„ ν™•μΈν•˜κ³ 
kubectl get pods --kubeconfig /root/CKA/admin.kubeconfig
 
# κΈ°λ³Έ 섀정을 ν™•μΈν•œ λ’€
cat ~/.kube/config
 
# ν‹€λ¦° 뢀뢄을 μˆ˜μ •ν•˜κ³ 
vi /root/CKA/admin.kubeconfig
 
# λ‹€μ‹œ kubeconfig νŒŒμΌμ„ 확인
kubectl get pods --kubeconfig /root/CKA/admin.kubeconfig

Rolling Update


kubectl create deployment nginx-deploy --image=nginx:1.16
kubectl set image deployment/nginx-deploy nginx=nginx:1.17

Fix PVC


# Deployment 확인
kubectl get deployment -n alpha alpha-mysql  -o yaml | yq e .spec.template.spec.containers -
 
# Pod μ—λŸ¬ 확인
kubectl get pods -n alpha
kubectl describe pod -n alpha alpha-mysql-xxxxxxxx-xxxxx
 
# PV 확인
kubectl get pv alpha-pv
# PVC μˆ˜μ •
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-alpha-pvc
  namespace: alpha
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: slow

μœ ν˜•

  1. Control Plane Upgrade, kubeadm, kubelet, kubectl
  2. Pod 에 label ν• λ‹Ή ν›„ 배포
  3. Node Selector λ₯Ό 톡해 νŠΉμ • Node 에 Pod 배포
  4. Pod 둜그 ν™•μΈν•˜μ—¬ νŠΉμ • 단어가 λ“€μ–΄κ°„ 둜그 μΆ”μΆœ

2025-02-28 이후 μœ ν˜•

  1. Deployment 배포
  2. Priority Class
  3. (Trouble Shooting) Pod
  4. WorkerNode Not Ready to Ready νŠΈλŸ¬λΈ”μŠˆνŒ…
    1. 보톡 kubelet
    2. etcd 인증 κ΄€λ ¨ 이슈
  5. Service Expose
    1. 이미 배포된 Deployment 의 ContainerPort λ₯Ό NodePort 둜 expose
  6. Storage Class, PV, PVC
    1. PVC 생성 (PV 와 SC λŠ” 주어짐) + kubectl edit λ˜λŠ” kubectl patch 둜 μš©λŸ‰ λ³€κ²½
    2. PV - PVC - Pod 마운트
    3. StorageClass λŠ” 보톡 docs 볡뢙 ν›„ ν•„λ“œ μˆ˜μ •μœΌλ‘œ 간단함
  7. Gateway API 및 TLS 연동
    1. Ingres 섀정을 GatewayAPI/HTTPRoute 와 연계 적용
    2. TLS 연동 Gateway yaml
  8. Helm λͺ…λ Ήμ–΄
  9. CNI 및 Network Policy 배포
    1. NetworkPolicy 적용이 κ°€λŠ₯ν•œ Calico μ„€μΉ˜
  10. CRI
    1. dpkg -i λͺ…λ ΉμœΌλ‘œ .deb 파일 μ„€μΉ˜
    2. μ„€μΉ˜ ν›„ net.ipv4.ip_forward = 1 λ“± λ„€νŠΈμ›Œν¬ μ„€μ • (sysctl -p /etc/sysctl.d/k8s.conf)
  11. Multi Container
    1. 둜그 μˆ˜μ§‘ λͺ©μ  μ‚¬μ΄λ“œμΉ΄ μ»¨ν…Œμ΄λ„ˆ μΆ”κ°€
  12. HPA
    1. CPU max/min μ„€μ •, Stabilization window κ΅¬ν˜„
    2. 보톡 docs 볡뢙 ν›„ ν•„λ“œ μˆ˜μ •μœΌλ‘œ 간단함
  13. Taint, Toleration, NodeAffinity
  14. RBAC
    1. ServiceAccount
    2. ClusterRole/Role
    3. ClusterRoleBinding

Create Pod with multiple containers (8)

k -n mc-namespace run mc-pod --image=nginx:1-alpine -o=yaml --dry-run=client > mc-pod.yaml

vi mc-pod.yaml
# mc-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  namespace: mc-namespace
  name: mc-pod
spec:
  containers:
  - name: mc-pod-1
    image: nginx:1-alpine
    env:
    - name: NODE_NAME
      valueFrom:
        fieldRef:
          fieldPath: spec.nodeName
  - name: mc-pod-2
    image: busybox:1
    command:
    - "sh"
    - "-c"
    - "while true; do date >> /var/log/shared/date.log; sleep 1; done"
    volumeMounts:
    - name: shared-volume
      mountPath: /var/log/shared
  - name: mc-pod-3
    image: busybox:1
    command:
    - "sh"
    - "-c"
    - "tail -f /var/log/shared/date.log"
    volumeMounts:
    - name: shared-volume
      mountPath: /var/log/shared
  volumes:
  - name: shared-volume
    emptyDir: {}
k -n mc-namespace get pod
k -n mc-namespace logs mc-pod -c mc-pod-3 -f

Install container runtime (7)

~ ssh bob@node01
bob@node01's password:

bob@node01 ~ sudo su

root@node01 /home/bob cd /root

root@node01 ~ ls
cri-docker_0.3.16.3-0.debian.deb

root@node01 ~ dpkg -i ./cri-docker_0.3.16.3-0.debian.deb

root@node01 ~ systemctl start cri-docker

root@node01 ~ systemctl enable cri-docker

root@node01 ~ systemctl status cri-docker

root@node01 ~ systemctl is-enabled cri-docker
enabled

Expose pod with Service (8)

controlplane ~ k get pods
NAME        READY   STATUS    RESTARTS   AGE
messaging   1/1     Running   0          4m2s
 
controlplane ~ k expose pod messaging --port=6379 --name=messaging-service
service/messaging-service exposed
 
controlplane ~ k get svc
NAME                TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
kubernetes          ClusterIP   172.20.0.1     <none>        443/TCP    48m
messaging-service   ClusterIP   172.20.26.32   <none>        6379/TCP   6s

Create Deployment (10)

controlplane ~ k create deployment hr-web-app --image=kodekloud/webapp-color --replicas=2

Expose Depolyment with Service (8)

controlplane ~ k expose deployment hr-web-app --type=NodePort --port=8080 --name=hr-web-app-service --dry-run=client -o=yaml > hr-svc.yaml
 
controlplane ~ vi hr-svc.yaml 
 
controlplane ~ k apply -f hr-svc.yaml 
service/hr-web-app-service created
 
controlplane ~ k describe svc hr-web-app-service 
Name:                     hr-web-app-service
Namespace:                default
Labels:                   app=hr-web-app
Annotations:              <none>
Selector:                 app=hr-web-app
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       172.20.13.84
IPs:                      172.20.13.84
Port:                     <unset>  8080/TCP
TargetPort:               8080/TCP
NodePort:                 <unset>  30082/TCP
Endpoints:                172.17.0.11:8080,172.17.0.10:8080
Session Affinity:         None
External Traffic Policy:  Cluster
Internal Traffic Policy:  Cluster
Events:                   <none>

Create PV (8)

controlplane ~ vi pv-analytics.yaml
 
controlplane ~ cat pv-analytics.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-analytics
spec:
  capacity:
    storage: 100Mi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteMany
  hostPath:
    path: /pv/data-analytics
 
controlplane ~ k apply -f pv-analytics.yaml 
persistentvolume/pv-analytics created
 
controlplane ~ k get pv
NAME           CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
pv-analytics   100Mi      RWX            Retain           Available                          <unset>                          2s

Create HPA (10)

controlplane ~ ➜  vi webapp-hpa.yaml 
 
controlplane ~ ➜  cat webapp-hpa.yaml 
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: webapp-hpa
  namespace: default
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: kkapp-deploy
  minReplicas: 2
  maxReplicas: 10
  metrics:
   - type: Resource
     resource:
       name: cpu
       target:
         type: Utilization
         averageUtilization: 50
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 300
 
controlplane ~ ➜  k apply  -f webapp-hpa.yaml 
horizontalpodautoscaler.autoscaling/webapp-hpa created
 
controlplane ~ ➜  k get hpa
NAME         REFERENCE                 TARGETS              MINPODS   MAXPODS   REPLICAS   AGE
webapp-hpa   Deployment/kkapp-deploy   cpu: <unknown>/50%   2         10        0          5s

Create VPA (9)

controlplane ~ ➜  vi analytics-vpa.yaml 
 
controlplane ~ ➜  cat analytics-vpa.yaml 
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
  name: analytics-vpa
  namespace: default
spec:
  targetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: analytics-deployment
  updatePolicy:
    updateMode: Auto
 
controlplane ~ ➜  k apply -f analytics-vpa.yaml 
verticalpodautoscaler.autoscaling.k8s.io/analytics-vpa created
 
controlplane ~ ➜  k get vpa
NAME            MODE   CPU   MEM   PROVIDED   AGE
analytics-vpa   Auto               False      38s

Create Gateway (6)

controlplane ~ ➜  vi web-gateway.yaml
 
controlplane ~ ➜  cat web-gateway.yaml 
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
  name: web-gateway
  namespace: nginx-gateway
spec:
  gatewayClassName: nginx
  listeners:
  - name: http
    protocol: HTTP
    port: 80
 
controlplane ~ ➜  k apply -f web-gateway.yaml 
gateway.gateway.networking.k8s.io/web-gateway created
 
controlplane ~ ➜  k -n nginx-gateway get gateway
NAME          CLASS   ADDRESS   PROGRAMMED   AGE
web-gateway   nginx             True         13s

Helm Chart upgrade (8)

controlplane ~ ➜  helm list -n kk-ns
NAME            NAMESPACE       REVISION        UPDATED                                 STATUS          CHART           APP VERSION
kk-mock1        kk-ns           1               2025-11-06 14:28:24.255194099 +0000 UTC deployed        nginx-18.1.0    1.27.0     
 
controlplane ~ ➜  helm repo list
NAME            URL                               
kk-mock1        https://charts.bitnami.com/bitnami
 
controlplane ~ ➜  helm repo update
Hang tight while we grab the latest from your chart repositories...
...Successfully got an update from the "kk-mock1" chart repository
Update Complete. ⎈Happy Helming!⎈
 
controlplane ~ ➜  helm repo list
NAME            URL                               
kk-mock1        https://charts.bitnami.com/bitnami
 
controlplane ~ ➜  helm search repo nginx
NAME                                    CHART VERSION   APP VERSION   DESCRIPTION                                       
kk-mock1/nginx                          22.2.4          1.29.3        NGINX Open Source is a web server that can be a...
kk-mock1/nginx-ingress-controller       12.0.7          1.13.1        NGINX Ingress Controller is an Ingress controll...
kk-mock1/nginx-intel                    2.1.15          0.4.9         DEPRECATED NGINX Open Source for Intel is a lig...
 
controlplane ~ ➜  helm search repo nginx --versions | grep 18.1.15
kk-mock1/nginx                          18.1.15         1.27.1        NGINX Open Source is a web server that can be a...
 
controlplane ~ ➜  helm upgrade kk-mock1 kk-mock1/nginx --version=18.1.5 -n kk-ns
Release "kk-mock1" has been upgraded. Happy Helming!
NAME: kk-mock1
LAST DEPLOYED: Thu Nov  6 14:31:52 2025
NAMESPACE: kk-ns
STATUS: deployed
REVISION: 2
TEST SUITE: None
NOTES:
CHART NAME: nginx
CHART VERSION: 18.1.5
APP VERSION: 1.27.0
 
** Please be patient while the chart is being deployed **
NGINX can be accessed through the following DNS name from within your cluster:
 
    kk-mock1-nginx.kk-ns.svc.cluster.local (port 80)
 
To access NGINX from outside the cluster, follow the steps below:
 
1. Get the NGINX URL by running these commands:
 
  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
        Watch the status with: 'kubectl get svc --namespace kk-ns -w kk-mock1-nginx'
 
    export SERVICE_PORT=$(kubectl get --namespace kk-ns -o jsonpath="{.spec.ports[0].port}" services kk-mock1-nginx)
    export SERVICE_IP=$(kubectl get svc --namespace kk-ns kk-mock1-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
    echo "http://${SERVICE_IP}:${SERVICE_PORT}"
 
WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
  - cloneStaticSiteFromGit.gitSync.resources
  - resources
+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
 
⚠ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.
 
Substituted images detected:
  - %!s(<nil>)/:%!s(<nil>)
 
controlplane ~ ➜  helm list -n kk-ns
NAME            NAMESPACE       REVISION        UPDATED                                        STATUS          CHART           APP VERSION
kk-mock1        kk-ns           2               2025-11-06 14:31:52.339403678 +0000 UTC        deployed        nginx-18.1.5    1.27.0

Create StorageClass (6)

controlplane ~ ➜  vi local-sc.yaml
 
controlplane ~ ➜  k apply -f local-sc.yaml 
storageclass.storage.k8s.io/local-sc created
 
controlplane ~ ➜  k get sc
NAME                 PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-sc (default)   kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   true                   4s
 
controlplane ~ ➜  cat local-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-sc
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/no-provisioner
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

Create Deployment (10)

controlplane ~ ➜  k create deployment logging-deployment --image=busybox --namespace=logging-ns --replicas=1 --dry-run=client -o=yaml > logging-deployment.yaml
 
controlplane ~ ➜  vi logging-deployment.yaml 
 
controlplane ~ ➜  cat logging-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: logging-deployment
  namespace: logging-ns
spec:
  replicas: 1
  selector:
    matchLabels:
      app: logging-deployment
  template:
    metadata:
      labels:
        app: logging-deployment
    spec:
      volumes:
      - name: shared-volume
        emptyDir: {}
      containers:
      - image: busybox
        name: app-container
        command:
        - "sh"
        - "-c"
        - "while true; do echo 'Log entry' >> /var/log/app/app.log; sleep 5; done"
        resources: {}
        volumeMounts:
         - mountPath: /var/log/app
           name: shared-volume
      - image: busybox
        name: log-agent
        command:
        - "sh"
        - "-c"
        - "tail -f /var/log/app/app.log"
        volumeMounts:
         - mountPath: /var/log/app
           name: shared-volume
status: {}
 
controlplane ~ ➜  k create -f logging-deployment.yaml 
deployment.apps/logging-deployment created

Create Ingress (10)

controlplane ~ ➜  k -n ingress-ns get deploy
NAME            READY   UP-TO-DATE   AVAILABLE   AGE
webapp-deploy   1/1     1            1           10m
 
controlplane ~ ➜  k -n ingress-ns get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
webapp-svc   ClusterIP   172.20.133.154   <none>        80/TCP    10m
 
controlplane ~ ➜  vi webapp-ingress.yaml 
 
controlplane ~ ➜  cat webapp-ingress.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: webapp-ingress
  namespace: ingress-ns
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  ingressClassName: nginx
  rules:
  - host: "kodekloud-ingress.app"
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: webapp-svc
            port:
              number: 80
 
 
controlplane ~ ➜  k create -f webapp-ingress.yaml 
ingress.networking.k8s.io/webapp-ingress created
 
controlplane ~ ➜  k -n ingress-ns get ingress
NAME             CLASS   HOSTS                   ADDRESS   PORTS   AGE
webapp-ingress   nginx   kodekloud-ingress.app             80      9s
 
controlplane ~ ➜  curl -s http://kodekloud-ingress.app/
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
 
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
 
<p><em>Thank you for using nginx.</em></p>
</body>
</html>

Create Role and RoleBinding (10)

controlplane ~ ➜  cat /root/CKA/john.csr | base64 | tr -d '\n'
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
controlplane ~ ➜  vi csr.yaml 
 
controlplane ~ ➜  cat csr.yaml 
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
  name: john-developer
spec:
  signerName: kubernetes.io/kube-apiserver-client
  request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
  usages:
  - digital signature
  - key encipherment
  - client auth
 
controlplane ~ ➜  k apply -f csr.yaml 
certificatesigningrequest.certificates.k8s.io/john-developer created
 
controlplane ~ ➜  k get csr
NAME             AGE   SIGNERNAME                                    REQUESTOR                  REQUESTEDDURATION   CONDITION
csr-f79dh        38m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:pr9dl6    <none>              Approved,Issued
csr-wgd7n        39m   kubernetes.io/kube-apiserver-client-kubelet   system:node:controlplane   <none>              Approved,Issued
john-developer   4s    kubernetes.io/kube-apiserver-client           kubernetes-admin           <none>              Pending
 
controlplane ~ ➜  k certificate approve john-developer
certificatesigningrequest.certificates.k8s.io/john-developer approved
 
controlplane ~ ➜  k get csr
NAME             AGE   SIGNERNAME                                    REQUESTOR                  REQUESTEDDURATION   CONDITION
csr-f79dh        39m   kubernetes.io/kube-apiserver-client-kubelet   system:bootstrap:pr9dl6    <none>              Approved,Issued
csr-wgd7n        40m   kubernetes.io/kube-apiserver-client-kubelet   system:node:controlplane   <none>              Approved,Issued
john-developer   28s   kubernetes.io/kube-apiserver-client           kubernetes-admin           <none>              Approved,Issued
 
controlplane ~ ➜  vi rbac.yaml 
 
controlplane ~ ➜  cat rbac.yaml 
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  namespace: development
  name: developer
rules:
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["create", "get", "update", "list", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: john-dev-role-binding
  namespace: development
subjects:
- kind: User
  name: john
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: Role
  name: developer 
  apiGroup: rbac.authorization.k8s.io
 
 
controlplane ~ ➜  k apply -f rbac.yaml 
role.rbac.authorization.k8s.io/developer created
rolebinding.rbac.authorization.k8s.io/john-dev-role-binding created
 
controlplane ~ ➜  k auth can-i create pods --as=john -n development
yes
 
controlplane ~ ➜  k auth can-i create pods --as=john
no

Create ClusterIP Service and test dns lookup (10)

controlplane ~ ➜  k run nginx-resolver --image=nginx
pod/nginx-resolver created
 
controlplane ~ ➜  k expose pod nginx-resolver --name=nginx-resolver-svc --port=80 --target-port=80 --type=ClusterIP
service/nginx-resolver-svc exposed
 
controlplane ~ ➜  k get svc
NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
kubernetes           ClusterIP   172.20.0.1      <none>        443/TCP   51m
nginx-resolver-svc   ClusterIP   172.20.137.21   <none>        80/TCP    4s
 
controlplane ~ ➜  k describe svc nginx-resolver-svc 
Name:                     nginx-resolver-svc
Namespace:                default
Labels:                   run=nginx-resolver
Annotations:              <none>
Selector:                 run=nginx-resolver
Type:                     ClusterIP
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       172.20.137.21
IPs:                      172.20.137.21
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
Endpoints:                172.17.1.15:80
Session Affinity:         None
Internal Traffic Policy:  Cluster
Events:                   <none>
 
controlplane ~ ➜  k get pod -o wide
NAME                            READY   STATUS    RESTARTS   AGE   IP            NODE     NOMINATED NODE   READINESS GATES
nginx-deploy-5846bc77f5-2szm4   1/1     Running   0          20m   172.17.1.14   node01   <none>           <none>
nginx-resolver                  1/1     Running   0          85s   172.17.1.15   node01   <none>           <none>
 
controlplane ~ ➜  k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-svc > /root/CKA/nginx.svc
All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.
If you don't see a command prompt, try pressing enter.
 
controlplane ~ ➜  cat /root/CKA/nginx.svc 
Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.local
 
Name:      nginx-resolver-svc
Address 1: 172.20.137.21 nginx-resolver-svc.default.svc.cluster.local
pod "test-nslookup" deleted from default namespace
 
controlplane ~ ➜  k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup 172-17-1-15.default.pod > /r
oot/CKA/nginx.pod
All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.
If you don't see a command prompt, try pressing enter.
 
controlplane ~ ➜  cat /root/CKA/nginx.pod
Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.local
 
Name:      172-17-1-15.default.pod
Address 1: 172.17.1.15 172-17-1-15.nginx-resolver-svc.default.svc.cluster.local
pod "test-nslookup" deleted from default namespace

Create StaticPod on node01 (10)

controlplane ~ ➜  k get nodes
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   59m   v1.34.0
node01         Ready    <none>          59m   v1.34.0
 
controlplane ~ ➜  k run nginx-critical --image=nginx --dry-run=client -o=yaml > static.yaml
 
controlplane ~ ➜  cat static.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: nginx-critical
  name: nginx-critical
spec:
  containers:
  - image: nginx
    name: nginx-critical
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
 
controlplane ~ ➜  ssh node01
Welcome to Ubuntu 22.04.5 LTS (GNU/Linux 5.15.0-1083-gcp x86_64)
 
 * Documentation:  https://help.ubuntu.com
 * Management:     https://landscape.canonical.com
 * Support:        https://ubuntu.com/pro
 
This system has been minimized by removing packages and content that are
not required on a system that users do not log into.
 
To restore this content, you can run the 'unminimize' command.
 
node01 ~ ➜  cd /etc/kubernetes/manifests/
 
node01 /etc/kubernetes/manifests ➜  ls
 
node01 /etc/kubernetes/manifests ➜  vi static.yaml
 
node01 /etc/kubernetes/manifests ➜  cat static.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: nginx-critical
  name: nginx-critical
spec:
  containers:
  - image: nginx
    name: nginx-critical
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
 
node01 /etc/kubernetes/manifests ➜  exit
logout
Connection to node01 closed.
 
controlplane ~ ➜  k get pod -o wide
NAME                            READY   STATUS    RESTARTS   AGE   IP            NODE     NOMINATED NODE   READINESS GATES
nginx-critical-node01           1/1     Running   0          23s   172.17.1.23   node01   <none>           <none>
nginx-deploy-5846bc77f5-2szm4   1/1     Running   0          31m   172.17.1.14   node01   <none>           <none>
nginx-resolver                  1/1     Running   0          12m   172.17.1.15   node01   <none>           <none>

Create HPA (10)

controlplane ~ ➜  ls
CKA  csr.yaml  local-sc.yaml  logging-deploy.yaml  rbac.yaml  static.yaml  webapp-hpa.yaml  webapp-ingress.yaml
 
controlplane ~ ➜  vi webapp-hpa.yaml 
 
controlplane ~ ➜  cat webapp-hpa.yaml 
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: backend-hpa
  namespace: backend
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: backend-deployment
  minReplicas: 3
  maxReplicas: 15
  metrics:
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 65
 
controlplane ~ ➜  k apply -f webapp-hpa.yaml 
horizontalpodautoscaler.autoscaling/backend-hpa unchanged
 
controlplane ~ ➜  k -n backend describe hpa
Name:                                                     backend-hpa
Namespace:                                                backend
Labels:                                                   <none>
Annotations:                                              <none>
CreationTimestamp:                                        Sat, 08 Nov 2025 11:57:11 +0000
Reference:                                                Deployment/backend-deployment
Metrics:                                                  ( current / target )
  resource memory on pods  (as a percentage of request):  <unknown> / 65%
Min replicas:                                             3
Max replicas:                                             15
Deployment pods:                                          3 current / 0 desired
Conditions:
  Type           Status  Reason                   Message
  ----           ------  ------                   -------
  AbleToScale    True    SucceededGetScale        the HPA controller was able to get the target's current scale
  ScalingActive  False   FailedGetResourceMetric  the HPA was unable to compute the replica count: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)
Events:
  Type     Reason                        Age               From                       Message
  ----     ------                        ----              ----                       -------
  Warning  FailedGetResourceMetric       4s (x6 over 79s)  horizontal-pod-autoscaler  failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)
  Warning  FailedComputeMetricsReplicas  4s (x6 over 79s)  horizontal-pod-autoscaler  invalid metrics (1 invalid out of 1), first error is: failed to get memory resource metric value: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)

Configure HTTPS Gateway (10)

controlplane ~ ➜  k -n cka5673 get gateway
NAME          CLASS       ADDRESS   PROGRAMMED   AGE
web-gateway   kodekloud             Unknown      8s
 
controlplane ~ ➜  k -n cka5673 get gateway -o yaml
apiVersion: v1
items:
- apiVersion: gateway.networking.k8s.io/v1
  kind: Gateway
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"gateway.networking.k8s.io/v1","kind":"Gateway","metadata":{"annotations":{},"name":"web-gateway","namespace":"cka5673"},"spec":{"gatewayClassName":"kodekloud","listeners":[{"name":"http","port":80,"protocol":"HTTP"}]}}
    creationTimestamp: "2025-11-08T12:05:44Z"
    generation: 1
    name: web-gateway
    namespace: cka5673
    resourceVersion: "10291"
    uid: d2ffe126-d82f-4213-94a9-86970e31b0a2
  spec:
    gatewayClassName: kodekloud
    listeners:
    - allowedRoutes:
        namespaces:
          from: Same
      name: http
      port: 80
      protocol: HTTP
  status:
    conditions:
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Accepted
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Programmed
kind: List
metadata:
  resourceVersion: ""
 
controlplane ~ ➜  k -n cka5673 get gateway -o yaml > web-gateway.yaml
 
controlplane ~ ➜  k -n cka5673 get secret
NAME            TYPE                DATA   AGE
kodekloud-tls   kubernetes.io/tls   2      2m18s
 
controlplane ~ ➜  cat web-gateway.yaml 
apiVersion: v1
items:
- apiVersion: gateway.networking.k8s.io/v1
  kind: Gateway
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"gateway.networking.k8s.io/v1","kind":"Gateway","metadata":{"annotations":{},"name":"web-gateway","namespace":"cka5673"},"spec":{"gatewayClassName":"kodekloud","listeners":[{"name":"http","port":80,"protocol":"HTTP"}]}}
    creationTimestamp: "2025-11-08T12:05:44Z"
    generation: 1
    name: web-gateway
    namespace: cka5673
    resourceVersion: "10291"
    uid: d2ffe126-d82f-4213-94a9-86970e31b0a2
  spec:
    gatewayClassName: kodekloud
    listeners:
    - allowedRoutes:
        namespaces:
          from: Same
      name: https
      port: 443
      protocol: HTTPS
      hostname: kodekloud.com
      tls:
        certificateRefs:
        - name: kodekloud-tls
  status:
    conditions:
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Accepted
    - lastTransitionTime: "1970-01-01T00:00:00Z"
      message: Waiting for controller
      reason: Pending
      status: Unknown
      type: Programmed
kind: List
metadata:
  resourceVersion: ""
 
controlplane ~ ➜  k apply -f web-gateway.yaml 
gateway.gateway.networking.k8s.io/web-gateway configured

Helm (10)

controlplane ~ ➜  helm list -A
NAME                    NAMESPACE               REVISION        UPDATED                                 STATUS          CHART                       APP VERSION
atlanta-page-apd        atlanta-page-04         1               2025-11-08 10:57:43.405721672 +0000 UTC deployed        atlanta-page-apd-0.1.0      1.16.0     
digi-locker-apd         digi-locker-02          1               2025-11-08 10:57:40.988036054 +0000 UTC deployed        digi-locker-apd-0.1.0       1.16.0     
security-alpha-apd      security-alpha-01       1               2025-11-08 10:57:40.109579755 +0000 UTC deployed        security-alpha-apd-0.1.0    1.16.0     
web-dashboard-apd       web-dashboard-03        1               2025-11-08 10:57:41.989424936 +0000 UTC deployed        web-dashboard-apd-0.1.0     1.16.0     
 
controlplane ~ ➜  helm get manifest atlanta-page-apd -n atlanta-page-04
---
# Source: atlanta-page-apd/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: atlanta-page-sa
  labels:
    app: atlanta-page-apd
---
# Source: atlanta-page-apd/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: atlanta-page-svc
  labels:
    app: atlanta-page-apd-svc
spec:
  type: NodePort
  ports:
    - port: 80
      targetPort: http
      protocol: TCP
      name: http
  selector:
    app: atlanta-page-apd
---
# Source: atlanta-page-apd/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: atlanta-page-apd
  labels:
    app: atlanta-page-apd
spec:
  replicas: 4
  selector:
    matchLabels:
      app: atlanta-page-apd
  template:
    metadata:
      labels:
        app: atlanta-page-apd
    spec:
      serviceAccountName: atlanta-page-sa
      containers:
        - name: atlanta-page-apd
          image: "kodekloud/webapp-color:v1"
          imagePullPolicy: IfNotPresent
 
 
controlplane ~ ➜  helm get manifest atlanta-page-apd -n atlanta-page-04 | grep -i webapp-color:v1
          image: "kodekloud/webapp-color:v1"
 
controlplane ~ ➜  helm uninstall atlanta-page-apd -n atlanta-page-04
release "atlanta-page-apd" uninstalled

NetworkPolicy (6)

controlplane ~ ➜  ls
CKA       kodekloud.crt  local-sc.yaml        net-pol-1.yaml  net-pol-3.yaml  static.yaml      webapp-ingress.yaml
csr.yaml  kodekloud.key  logging-deploy.yaml  net-pol-2.yaml  rbac.yaml       webapp-hpa.yaml  web-gateway.yaml
 
controlplane ~ ➜  cat net-pol-1.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: net-policy-1
  namespace: backend
spec:
  podSelector: {}
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          access: allowed
    ports:
    - protocol: TCP
      port: 80
 
controlplane ~ ➜  cat net-pol-2.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: net-policy-2
  namespace: backend
spec:
  podSelector: {}
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: frontend
    - namespaceSelector:
        matchLabels:
          name: databases
    ports:
    - protocol: TCP
      port: 80
 
controlplane ~ ➜  cat net-pol-3.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: net-policy-3
  namespace: backend
spec:
  podSelector: {}
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: frontend
    ports:
    - protocol: TCP
      port: 80
 
controlplane ~ ➜  k get ns --show-labels
NAME                STATUS   AGE     LABELS
atlanta-page-04     Active   75m     kubernetes.io/metadata.name=atlanta-page-04
backend             Active   18m     kubernetes.io/metadata.name=backend,name=backend
cka5673             Active   7m30s   kubernetes.io/metadata.name=cka5673
default             Active   81m     kubernetes.io/metadata.name=default
development         Active   51m     kubernetes.io/metadata.name=development
digi-locker-02      Active   75m     kubernetes.io/metadata.name=digi-locker-02
frontend            Active   93s     kubernetes.io/metadata.name=frontend,name=frontend
ingress-nginx       Active   66m     app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx,kubernetes.io/metadata.name=ingress-nginx
ingress-ns          Active   66m     kubernetes.io/metadata.name=ingress-ns
kube-node-lease     Active   81m     kubernetes.io/metadata.name=kube-node-lease
kube-public         Active   81m     kubernetes.io/metadata.name=kube-public
kube-system         Active   81m     kubernetes.io/metadata.name=kube-system
logging-ns          Active   72m     kubernetes.io/metadata.name=logging-ns
nginx-gateway       Active   75m     kubernetes.io/metadata.name=nginx-gateway
security-alpha-01   Active   75m     kubernetes.io/metadata.name=security-alpha-01
web-dashboard-03    Active   75m     kubernetes.io/metadata.name=web-dashboard-03
 
controlplane ~ ➜  k apply -f net-pol-3.yaml 
networkpolicy.networking.k8s.io/net-policy-3 created

Adjusting network parameters with kubeadm (6)

# sysctl params required by setup, params persist across reboots
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
 
# Apply sysctl params without reboot
sudo sysctl --system

Create ServiceAccount, ClusterRole, ClusterRoleBinding (8)

controlplane ~ ➜  k create serviceaccount pvviewer
serviceaccount/pvviewer created

controlplane ~ ➜  k get sa
NAME       SECRETS   AGE
default    0         19m
pvviewer   0         4s

controlplane ~ ➜  k create clusterrole pvviewer-role --resource=persistentvolumes --verb=list
clusterrole.rbac.authorization.k8s.io/pvviewer-role created

controlplane ~ ➜  k describe clusterrole pvviewer-role 
Name:         pvviewer-role
Labels:       <none>
Annotations:  <none>
PolicyRule:
  Resources          Non-Resource URLs  Resource Names  Verbs
  ---------          -----------------  --------------  -----
  persistentvolumes  []                 []              [list]

controlplane ~ ➜  k create clusterrolebinding pvviewer-role-binding --clusterrole=pvviewer-role --serviceaccount=default:pvviewer
clusterrolebinding.rbac.authorization.k8s.io/pvviewer-role-binding created

controlplane ~ ➜  k describe clusterrolebindings pvviewer-role-binding
Name:         pvviewer-role-binding
Labels:       <none>
Annotations:  <none>
Role:
  Kind:  ClusterRole
  Name:  pvviewer-role
Subjects:
  Kind            Name      Namespace
  ----            ----      ---------
  ServiceAccount  pvviewer  default

controlplane ~ ➜  k run pvviewer --image=redis --dry-run=client -o yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: pvviewer
  name: pvviewer
spec:
  containers:
  - image: redis
    name: pvviewer
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}

controlplane ~ ➜  vi redis.yaml

controlplane ~ ➜  k apply -f redis.yaml 
pod/pvviewer created

controlplane ~ ➜  k get pod
NAME       READY   STATUS    RESTARTS   AGE
pvviewer   1/1     Running   0          6s

Create StorageClass (6)

controlplane ~ ➜  vi sc.yaml

controlplane ~ ➜  cat sc.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: rancher-sc
provisioner: rancher.io/local-path
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer

controlplane ~ ➜  k apply -f sc.yaml 
storageclass.storage.k8s.io/rancher-sc created

Create ConfigMap (8)

controlplane ~ ➜  k create configmap app-config -n cm-namespace --from-literal=ENV=production --from-literal=LOG_LEVEL=info
configmap/app-config created

controlplane ~ ➜  k -n cm-namespace get cm
NAME               DATA   AGE
app-config         2      13s
kube-root-ca.crt   1      58s

controlplane ~ ➜  k -n cm-namespace get deployment
NAME        READY   UP-TO-DATE   AVAILABLE   AGE
cm-webapp   1/1     1            1           2m26s

controlplane ~ ➜  k edit -n cm-namespace deployment cm-webapp 
...
    spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx
        # ConfigMap μ„€μ • μΆ”κ°€
        envFrom:
          - configMapRef:
              name: app-config
...
deployment.apps/cm-webapp edited

Create PriorityClass (8)

controlplane ~ ➜  k create priorityclass low-priority --value=50000
priorityclass.scheduling.k8s.io/low-priority created

controlplane ~ ➜  k -n low-priority get pod
NAME     READY   STATUS    RESTARTS   AGE
lp-pod   1/1     Running   0          41s

controlplane ~ ➜  k -n low-priority edit pod lp-pod 
error: pods "lp-pod" is invalid
A copy of your changes has been stored to "/tmp/kubectl-edit-1609190342.yaml"
error: Edit cancelled, no valid changes were saved.

controlplane ~ βœ– k replace -f /tmp/kubectl-edit-1609190342.yaml --force 
pod "lp-pod" deleted from low-priority namespace
Error from server (Forbidden): pods "lp-pod" is forbidden: the integer value of priority (0) must not be provided in pod spec; priority admission controller computed 50000 from the given PriorityClass name

# spec.priority 제거
controlplane ~ βœ– vi /tmp/kubectl-edit-1609190342.yaml 

controlplane ~ ➜  k replace -f /tmp/kubectl-edit-1609190342.yaml --force 
pod/lp-pod replaced

Create NetworkPolicy (8)

controlplane ~ ➜  k get pod --show-labels 
NAME        READY   STATUS    RESTARTS   AGE     LABELS
np-test-1   1/1     Running   0          4m27s   run=np-test-1
pvviewer    1/1     Running   0          16m     <none>

controlplane ~ ➜  cat np.yaml 
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: ingress-to-nptest
  namespace: default
spec:
  podSelector:
    matchLabels:
      run: np-test-1
  policyTypes:
  - Ingress
  ingress:
    - ports:
      - protocol: TCP
        port: 80

controlplane ~ ➜  k apply -f np.yaml 
networkpolicy.networking.k8s.io/ingress-to-nptest created

Taint and Toleration (12)

controlplane ~ ➜  k get nodes
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   36m   v1.34.0
node01         Ready    <none>          35m   v1.34.0

controlplane ~ ➜  k taint node node01 env_type=production:NoSchedule
node/node01 tainted

controlplane ~ ➜  k describe node node01 | grep -i taint
Taints:             env_type=production:NoSchedule

controlplane ~ ➜  k run dev-redis --image=redis:alpine
pod/dev-redis created

controlplane ~ ➜ vi prod-redis.yaml

controlplane ~ ➜  cat prod-redis.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: prod-redis
spec:
  containers:
  - name: prod-redis
    image: redis:alpine
  tolerations:
  - key: "env_type"
    operator: "Equal"
    value: "production"
    effect: "NoSchedule"


controlplane ~ ➜  k apply -f prod-redis.yaml 
pod/prod-redis created

controlplane ~ ➜  k get pods -o wide
NAME         READY   STATUS    RESTARTS   AGE    IP           NODE           NOMINATED NODE   READINESS GATES
dev-redis    1/1     Running   0          3m5s   172.17.0.5   controlplane   <none>           <none>
np-test-1    1/1     Running   0          11m    172.17.1.8   node01         <none>           <none>
prod-redis   1/1     Running   0          7s     172.17.1.9   node01         <none>           <none>
pvviewer     1/1     Running   0          23m    172.17.1.3   node01         <none>           <none>

Inspect PVC and PV (6)

controlplane ~ ➜  k get pv
NAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
app-pv   1Gi        RWO            Retain           Available                          <unset>                          40s

controlplane ~ ➜  k get pvc -n storage-ns 
NAME      STATUS    VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
app-pvc   Pending                                                     <unset>                 59s

controlplane ~ ➜  k get pvc -n storage-ns -o yaml
apiVersion: v1
items:
- apiVersion: v1
  kind: PersistentVolumeClaim
  metadata:
    annotations:
      kubectl.kubernetes.io/last-applied-configuration: |
        {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"app-pvc","namespace":"storage-ns"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"1Gi"}}}}
    creationTimestamp: "2025-11-26T14:04:47Z"
    finalizers:
    - kubernetes.io/pvc-protection
    name: app-pvc
    namespace: storage-ns
    resourceVersion: "5401"
    uid: e0092a4c-9d4d-47ad-b091-f23c1a8dfae4
  spec:
    accessModes:
    - ReadWriteMany
    resources:
      requests:
        storage: 1Gi
    volumeMode: Filesystem
  status:
    phase: Pending
kind: List
metadata:
  resourceVersion: ""

# accessModes μˆ˜μ •
controlplane ~ ➜  k edit pvc -n storage-ns app-pvc 
error: persistentvolumeclaims "app-pvc" is invalid
A copy of your changes has been stored to "/tmp/kubectl-edit-1829119349.yaml"
error: Edit cancelled, no valid changes were saved.

controlplane ~ βœ– k replace -f /tmp/kubectl-edit-1829119349.yaml --force 
persistentvolumeclaim "app-pvc" deleted from storage-ns namespace
persistentvolumeclaim/app-pvc replaced

controlplane ~ ➜  k get pvc -n storage-ns 
NAME      STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
app-pvc   Bound    app-pv   1Gi        RWO                           <unset>                 7s

Fix kubeconfig (8)

controlplane ~ ➜  cat CKA/super.kubeconfig 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: ...
    server: https://controlplane:9999
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
users:
- name: kubernetes-admin
  user:
    client-certificate-data: ...
    client-key-data: ...

controlplane ~ ➜  k get node --kubeconfig=/root/CKA/super.kubeconfig
E1126 14:09:25.390277   46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.390610   46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.392730   46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.393061   46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.394498   46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
The connection to the server controlplane:9999 was refused - did you specify the right host or port?

controlplane ~ βœ– sudo netstat -tulnp | grep kube-apiserver
tcp6       0      0 :::6443                 :::*                    LISTEN      2847/kube-apiserver 

# port μˆ˜μ •
controlplane ~ ➜  vi /root/CKA/super.kubeconfig 

controlplane ~ ➜  k get node --kubeconfig=/root/CKA/super.kubeconfig
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   49m   v1.34.0
node01         Ready    <none>          48m   v1.34.0

Fix controller-manager (10)

controlplane ~ ➜  k get deploy nginx-deploy 
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deploy   1/1     1            1           5m36s

controlplane ~ ➜  k scale deployment nginx-deploy --replicas=3
deployment.apps/nginx-deploy scaled

controlplane ~ ➜  k get deploy nginx-deploy 
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deploy   1/3     1            1           6m16s

# deployment-controller μ—μ„œ μ΄λ²€νŠΈκ°€ μ „λ‹¬λ˜μ§€ μ•ŠμŒ
controlplane ~ ➜  k describe deploy nginx-deploy | grep -A5 Events
Events:
  Type    Reason             Age    From                   Message
  ----    ------             ----   ----                   -------
  Normal  ScalingReplicaSet  8m36s  deployment-controller  Scaled up replica set nginx-deploy-59874dbc6b from 0 to 1

# controller-manager κ°€ μ—†μŒ
controlplane ~ ➜  k get pod -n kube-system 
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-587f6db6c5-bkn26   1/1     Running   0          55m
canal-jg7mj                                2/2     Running   0          54m
canal-lmhj8                                2/2     Running   0          55m
coredns-6678bcd974-jkff2                   1/1     Running   0          55m
coredns-6678bcd974-wjcgs                   1/1     Running   0          55m
etcd-controlplane                          1/1     Running   0          55m
kube-apiserver-controlplane                1/1     Running   0          55m
kube-proxy-mqwb8                           1/1     Running   0          55m
kube-proxy-vcc8z                           1/1     Running   0          54m
kube-scheduler-controlplane                1/1     Running   0          55m

# manifest μˆ˜μ •
controlplane ~ ➜  vi /etc/kubernetes/manifests/kube-controller-manager.yaml 

controlplane ~ ➜  k get pod -n kube-system 
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-587f6db6c5-bkn26   1/1     Running   0          58m
canal-jg7mj                                2/2     Running   0          57m
canal-lmhj8                                2/2     Running   0          58m
coredns-6678bcd974-jkff2                   1/1     Running   0          58m
coredns-6678bcd974-wjcgs                   1/1     Running   0          58m
etcd-controlplane                          1/1     Running   0          58m
kube-apiserver-controlplane                1/1     Running   0          58m
kube-controller-manager-controlplane       1/1     Running   0          21s
kube-proxy-mqwb8                           1/1     Running   0          58m
kube-proxy-vcc8z                           1/1     Running   0          57m
kube-scheduler-controlplane                1/1     Running   0          58m

controlplane ~ ➜  k get deploy nginx-deploy 
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deploy   3/3     3            3           12m

Create HPA (6)

controlplane ~ ➜  vi hpa.yaml 

controlplane ~ ➜  cat hpa.yaml 
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: api-hpa
  namespace: api
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: api-deployment
  minReplicas: 1
  maxReplicas: 20
  metrics:
  - type: Pods
    pods:
      metric:
        name: requests_per_second
      target:
        type: AverageValue
        averageValue: "1000"

controlplane ~ ➜  k apply -f hpa.yaml 
horizontalpodautoscaler.autoscaling/api-hpa created

controlplane ~ ➜  k describe hpa -n api 
Name:                             api-hpa
Namespace:                        api
Labels:                           <none>
Annotations:                      <none>
CreationTimestamp:                Wed, 26 Nov 2025 14:29:06 +0000
Reference:                        Deployment/api-deployment
Metrics:                          ( current / target )
  "requests_per_second" on pods:  <unknown> / 1k
Min replicas:                     1
Max replicas:                     20
Deployment pods:                  0 current / 0 desired
Events:                           <none>

Create HTTPRoute (6)

controlplane ~ ➜  k get service
NAME              TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes        ClusterIP   172.20.0.1       <none>        443/TCP   71m
np-test-service   ClusterIP   172.20.59.134    <none>        80/TCP    42m
web-service       ClusterIP   172.20.154.119   <none>        80/TCP    4m34s
web-service-v2    ClusterIP   172.20.191.4     <none>        80/TCP    4m34s

controlplane ~ ➜  k get gateway
NAME          CLASS   ADDRESS   PROGRAMMED   AGE
web-gateway   nginx             True         4m38s

controlplane ~ ➜  vi hr.yaml

controlplane ~ ➜  cat hr.yaml 
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
  name: web-route
spec:
  parentRefs:
  - name: web-gateway
  rules:
  - matches:
    - path:
        type: PathPrefix
        value: /
    backendRefs:
    - name: web-service
      port: 80
      weight: 80
    - name: web-service-v2
      port: 80
      weight: 20

controlplane ~ ➜  k apply -f hr.yaml 
httproute.gateway.networking.k8s.io/web-route created

Helm install (4)

controlplane ~ ➜  helm lint /root/new-version/
==> Linting /root/new-version/
[INFO] Chart.yaml: icon is recommended

1 chart(s) linted, 0 chart(s) failed

controlplane ~ ➜ helm install --generate-name /root/new-version/
NAME: new-version-1764167838
LAST DEPLOYED: Wed Nov 26 14:37:18 2025
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

controlplane ~ ➜  helm list
NAME                    NAMESPACE       REVISION        UPDATED                                        STATUS          CHART                  APP VERSION
new-version-1764167838  default         1               2025-11-26 14:37:18.205659093 +0000 UTC        deployed        webpage-server-02-0.1.1v2         
webpage-server-01       default         1               2025-11-26 14:35:32.19008219 +0000 UTC         deployed        webpage-server-01-0.1.0v1         

controlplane ~ ➜  helm uninstall webpage-server-01
release "webpage-server-01" uninstalled

controlplane ~ ➜  helm list
NAME                    NAMESPACE       REVISION        UPDATED                                        STATUS          CHART                  APP VERSION
new-version-1764167838  default         1               2025-11-26 14:37:18.205659093 +0000 UTC        deployed        webpage-server-02-0.1.1v2

Identify pod CIDR (4)

controlplane ~ ➜  k get node
NAME           STATUS   ROLES           AGE   VERSION
controlplane   Ready    control-plane   77m   v1.34.0
node01         Ready    <none>          76m   v1.34.0

controlplane ~ ➜  k get node controlplane -o yaml | grep -A5 spec
spec:
  podCIDR: 172.17.0.0/24
  podCIDRs:
  - 172.17.0.0/24
status:
  addresses:

controlplane ~ ➜  k get node -o jsonpath='{.items[0].spec.podCIDR}' > /root/pod-cidr.txt

controlplane ~ ➜  cat /root/pod-cidr.txt 
172.17.0.0/24

Identify podSubnet (4)

controlplane ~ βœ– k -n kube-system get configmap kubeadm-config -o yaml
apiVersion: v1
data:
  ClusterConfiguration: |
    apiServer:
      certSANs:
      - controlplane
    apiVersion: kubeadm.k8s.io/v1beta4
    caCertificateValidityPeriod: 87600h0m0s
    certificateValidityPeriod: 8760h0m0s
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: controlplane:6443
    controllerManager: {}
    dns: {}
    encryptionAlgorithm: RSA-2048
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.k8s.io
    kind: ClusterConfiguration
    kubernetesVersion: v1.34.0
    networking:
      dnsDomain: cluster.local
      podSubnet: 172.17.0.0/16
      serviceSubnet: 172.20.0.0/16
    proxy: {}
    scheduler: {}
kind: ConfigMap
metadata:
  creationTimestamp: "2025-11-26T13:23:01Z"
  name: kubeadm-config
  namespace: kube-system
  resourceVersion: "247"
  uid: b191c302-a5a6-4a8e-b233-f53354780077

controlplane ~ ➜  kubectl -n kube-system get configmap kubeadm-config -o yaml | awk '/podSubnet:/{print $2}' > /root/pod-cidr.txt

controlplane ~ ➜  cat /root/pod-cidr.txt 
172.17.0.0/16

minikube μ‹€μŠ΅ ν™˜κ²½ ꡬ성


minikube μ„€μΉ˜

brew install minikube

minikube μ‹€ν–‰

meatsby ξ‚° ~ ξ‚° minikube start
πŸ˜„  Darwin 15.6.1 (arm64) 의 minikube v1.37.0
✨  μžλ™μ μœΌλ‘œ docker λ“œλΌμ΄λ²„κ°€ μ„ νƒλ˜μ—ˆμŠ΅λ‹ˆλ‹€. λ‹€λ₯Έ λ“œλΌμ΄λ²„ λͺ©λ‘: virtualbox, ssh
πŸ“Œ  Docker Desktop λ“œλΌμ΄λ²„λ₯Ό 루트 κΆŒν•œμœΌλ‘œ μ‚¬μš© 쀑
πŸ‘  "minikube" ν΄λŸ¬μŠ€ν„°μ˜ "minikube" primary control-plane λ…Έλ“œλ₯Ό μ‹œμž‘ν•˜λŠ” 쀑
🚜  κΈ°λ³Έ 이미지 v0.0.48λ₯Ό κ°€μ Έμ˜€λŠ” 쀑 ...
πŸ’Ύ  μΏ λ²„λ„€ν‹°μŠ€ v1.34.0 을 λ‹€μš΄λ‘œλ“œ 쀑 ...
    > preloaded-images-k8s-v18-v1...:  332.38 MiB / 332.38 MiB  100.00% 22.68 M
πŸ”₯  docker container (CPUs=2, λ©”λͺ¨λ¦¬=4000MB) λ₯Ό μƒμ„±ν•˜λŠ” 쀑 ...
🐳  μΏ λ²„λ„€ν‹°μŠ€ v1.34.0 을 Docker 28.4.0 λŸ°νƒ€μž„μœΌλ‘œ μ„€μΉ˜ν•˜λŠ” 쀑
πŸ”—  bridge CNI (Container Networking Interface) λ₯Ό κ΅¬μ„±ν•˜λŠ” 쀑 ...
πŸ”Ž  Kubernetes ꡬ성 μš”μ†Œλ₯Ό 확인...
    β–ͺ 이미지 gcr.io/k8s-minikube/storage-provisioner:v5 μ‚¬μš© 쀑
🌟  μ• λ“œμ˜¨ ν™œμ„±ν™” : storage-provisioner, default-storageclass
πŸ„  λλ‚¬μŠ΅λ‹ˆλ‹€! kubectl이 "minikube" ν΄λŸ¬μŠ€ν„°μ™€ "default" λ„€μž„μŠ€νŽ˜μ΄μŠ€λ₯Ό 기본적으둜 μ‚¬μš©ν•˜λ„λ‘ κ΅¬μ„±λ˜μ—ˆμŠ΅λ‹ˆλ‹€
 
meatsby ξ‚° ~ ξ‚° k get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE   ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   ok

dashboard μ‹€ν–‰

meatsby ξ‚° ~ ξ‚° minikube dashboard
πŸ”Œ  λŒ€μ‹œλ³΄λ“œλ₯Ό ν™œμ„±ν™”ν•˜λŠ” 쀑 ...
    β–ͺ 이미지 docker.io/kubernetesui/metrics-scraper:v1.0.8 μ‚¬μš© 쀑
    β–ͺ 이미지 docker.io/kubernetesui/dashboard:v2.7.0 μ‚¬μš© 쀑
πŸ’‘  Some dashboard features require the metrics-server addon. To enable all features please run:
 
	minikube addons enable metrics-server
 
πŸ€”  Dashboard 의 μƒνƒœλ₯Ό 확인 μ€‘μž…λ‹ˆλ‹€ ...
πŸš€  ν”„λ‘μ‹œλ₯Ό μ‹œμž‘ν•˜λŠ” 쀑 ...
πŸ€”  Proxy 의 μƒνƒœλ₯Ό 확인 μ€‘μž…λ‹ˆλ‹€ ...
πŸŽ‰  Opening http://127.0.0.1:57530/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser...

Ingress μ‹€μŠ΅

# ingress addon μ„€μΉ˜
meatsby ξ‚° ~ ξ‚° minikube addons enable ingress
 
πŸ’‘  ingress is an addon maintained by Kubernetes. For any concerns contact minikube on GitHub.
You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS
πŸ’‘  μ• λ“œμ˜¨μ΄ ν™œμ„±ν™”λœ ν›„ "minikube tunnel"을 μ‹€ν–‰ν•˜λ©΄ 인그레슀 λ¦¬μ†ŒμŠ€λ₯Ό "127.0.0.1"μ—μ„œ μ‚¬μš©ν•  수 μžˆμŠ΅λ‹ˆλ‹€
    β–ͺ 이미지 registry.k8s.io/ingress-nginx/controller:v1.13.2 μ‚¬μš© 쀑
    β–ͺ 이미지 registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2 μ‚¬μš© 쀑
    β–ͺ 이미지 registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.6.2 μ‚¬μš© 쀑
πŸ”Ž  ingress μ• λ“œμ˜¨μ„ 확인 μ€‘μž…λ‹ˆλ‹€ ...
🌟  'ingress' μ• λ“œμ˜¨μ΄ ν™œμ„±ν™”λ˜μ—ˆμŠ΅λ‹ˆλ‹€
 
# nginx IngressClass μƒμ„±λœλ‹€.
meatsby ξ‚° ~ ξ‚° k get ingressclass
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       9s
 
# ingress-controller 둜 nginx νŒŒλ“œκ°€ ꡬ동쀑이닀.
meatsby ξ‚° ~ ξ‚° k get pods -n ingress-nginx
NAME                                       READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-77dsb       0/1     Completed   0          14s
ingress-nginx-admission-patch-wgw4h        0/1     Completed   1          14s
ingress-nginx-controller-9cc49f96f-gf9mt   1/1     Running     0          14s
 
# 예제 ingress λ₯Ό λ°°ν¬ν•΄λ³΄μž.
meatsby ξ‚° ~ ξ‚° kubectl apply -f https://storage.googleapis.com/minikube-site-examples/ingress-example.yaml
pod/foo-app created
service/foo-service created
pod/bar-app created
service/bar-service created
ingress.networking.k8s.io/example-ingress created
 
# ingress 생성 확인.
meatsby ξ‚° ~ ξ‚° k get ingress -w
NAME              CLASS   HOSTS   ADDRESS   PORTS   AGE
example-ingress   nginx   *                 80      6s
example-ingress   nginx   *       192.168.49.2   80      59s
 
# mac 은 minikube tunnel 을 μ‹€ν–‰ν•΄μ€˜μ•Όν•œλ‹€.
meatsby ξ‚° ~ ξ‚° minikube tunnel
βœ…  Tunnel successfully started
 
πŸ“Œ  NOTE: Please do not close this terminal as this process must stay alive for the tunnel to be accessible ...
 
❗  The service/ingress example-ingress requires privileged ports to be exposed: [80 443]
πŸ”‘  sudo permission will be asked for it.
πŸƒ  example-ingress μ„œλΉ„μŠ€μ˜ 터널을 μ‹œμž‘ν•˜λŠ” 쀑/
# minikube tunnel μ‹€ν–‰ ν›„ μƒˆλ‘œμš΄ ν„°λ―Έλ„μ—μ„œ curl μš”μ²­μ„ λ³΄λ‚΄λ³΄μž.
# bar-app 으둜 νŠΈλž˜ν”½μ΄ μ „λ‹¬λœλ‹€.
meatsby ξ‚° ~ ξ‚° curl 127.0.0.1/bar
Request served by bar-app
 
HTTP/1.1 GET /bar
 
Host: 127.0.0.1
Accept: */*
User-Agent: curl/8.7.1
X-Forwarded-For: 10.244.0.1
X-Forwarded-Host: 127.0.0.1
X-Forwarded-Port: 80
X-Forwarded-Proto: http
X-Forwarded-Scheme: http
X-Real-Ip: 10.244.0.1
X-Request-Id: 8fcbd14bfef458a7f88a9cef50b6698d
X-Scheme: http
 
# foo-app 으둜 νŠΈλž˜ν”½μ΄ μ „λ‹¬λœλ‹€.
meatsby ξ‚° ~ ξ‚° curl 127.0.0.1/foo
Request served by foo-app
 
HTTP/1.1 GET /foo
 
Host: 127.0.0.1
Accept: */*
User-Agent: curl/8.7.1
X-Forwarded-For: 10.244.0.1
X-Forwarded-Host: 127.0.0.1
X-Forwarded-Port: 80
X-Forwarded-Proto: http
X-Forwarded-Scheme: http
X-Real-Ip: 10.244.0.1
X-Request-Id: d1903d0f75a0cb744c3b5d01f08bd419
X-Scheme: http
 
# X-Forwarded-For: 10.244.0.1 κ°€ λ­”μ§€ μ‚΄νŽ΄λ³΄μž.
meatsby ξ‚° ~ ξ‚° minikube node list
minikube	192.168.49.2
 
# bridge 10.244.0.1 μ—μ„œ Pod 둜 μ „λ‹¬λœλ‹€.
meatsby ξ‚° ~ ξ‚° minikube ssh
docker@minikube:~$ ip addr show | grep bridge: -A5
13: bridge: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether 66:bc:1e:16:98:64 brd ff:ff:ff:ff:ff:ff
    inet 10.244.0.1/16 brd 10.244.255.255 scope global bridge
       valid_lft forever preferred_lft forever
    inet6 fe80::64bc:1eff:fe16:9864/64 scope link
       valid_lft forever preferred_lft forever

metrics-server μ„€μΉ˜

# top λͺ…령을 μ‚¬μš©ν•˜λ €λ©΄ metrics-server κ°€ κ΅¬λ™μ€‘μ΄μ–΄μ•Όν•œλ‹€.
meatsby ξ‚° ~ ξ‚° k top no
error: Metrics API not available
 
meatsby ξ‚° ~ ξ‚° k top po
error: Metrics API not available
 
# metrics-server addon μ„€μΉ˜
meatsby ξ‚° ~ ξ‚° minikube addons enable metrics-server
πŸ’‘  metrics-server is an addon maintained by Kubernetes. For any concerns contact minikube on GitHub.
You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS
    β–ͺ 이미지 registry.k8s.io/metrics-server/metrics-server:v0.8.0 μ‚¬μš© 쀑
🌟  'metrics-server' μ• λ“œμ˜¨μ΄ ν™œμ„±ν™”λ˜μ—ˆμŠ΅λ‹ˆλ‹€
 
# addon list 확인
meatsby ξ‚° ~ ξ‚° minikube addons list
β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ ┬─────┬──────┬────────────────────┐
β”‚         ADDON NAME          β”‚ PROFILE  β”‚   STATUS   β”‚               MAINTAINER               β”‚
β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ ┼─────┼──────┼─────────────────────
β”‚ ambassador                  β”‚ minikube β”‚ disabled   β”‚ 3rd party (Ambassador)                 β”‚
β”‚ amd-gpu-device-plugin       β”‚ minikube β”‚ disabled   β”‚ 3rd party (AMD)                        β”‚
β”‚ auto-pause                  β”‚ minikube β”‚ disabled   β”‚ minikube                               β”‚
β”‚ cloud-spanner               β”‚ minikube β”‚ disabled   β”‚ Google                                 β”‚
β”‚ csi-hostpath-driver         β”‚ minikube β”‚ disabled   β”‚ Kubernetes                             β”‚
β”‚ dashboard                   β”‚ minikube β”‚ enabled βœ… β”‚ Kubernetes                             β”‚
β”‚ default-storageclass        β”‚ minikube β”‚ enabled βœ… β”‚ Kubernetes                             β”‚
β”‚ efk                         β”‚ minikube β”‚ disabled   β”‚ 3rd party (Elastic)                    β”‚
β”‚ freshpod                    β”‚ minikube β”‚ disabled   β”‚ Google                                 β”‚
β”‚ gcp-auth                    β”‚ minikube β”‚ disabled   β”‚ Google                                 β”‚
β”‚ gvisor                      β”‚ minikube β”‚ disabled   β”‚ minikube                               β”‚
β”‚ headlamp                    β”‚ minikube β”‚ disabled   β”‚ 3rd party (kinvolk.io)                 β”‚
β”‚ inaccel                     β”‚ minikube β”‚ disabled   β”‚ 3rd party (InAccel [info@inaccel.com]) β”‚
β”‚ ingress                     β”‚ minikube β”‚ enabled βœ… β”‚ Kubernetes                             β”‚
β”‚ ingress-dns                 β”‚ minikube β”‚ disabled   β”‚ minikube                               β”‚
β”‚ inspektor-gadget            β”‚ minikube β”‚ disabled   β”‚ 3rd party (inspektor-gadget.io)        β”‚
β”‚ istio                       β”‚ minikube β”‚ disabled   β”‚ 3rd party (Istio)                      β”‚
β”‚ istio-provisioner           β”‚ minikube β”‚ disabled   β”‚ 3rd party (Istio)                      β”‚
β”‚ kong                        β”‚ minikube β”‚ disabled   β”‚ 3rd party (Kong HQ)                    β”‚
β”‚ kubeflow                    β”‚ minikube β”‚ disabled   β”‚ 3rd party                              β”‚
β”‚ kubetail                    β”‚ minikube β”‚ disabled   β”‚ 3rd party (kubetail.com)               β”‚
β”‚ kubevirt                    β”‚ minikube β”‚ disabled   β”‚ 3rd party (KubeVirt)                   β”‚
β”‚ logviewer                   β”‚ minikube β”‚ disabled   β”‚ 3rd party (unknown)                    β”‚
β”‚ metallb                     β”‚ minikube β”‚ disabled   β”‚ 3rd party (MetalLB)                    β”‚
β”‚ metrics-server              β”‚ minikube β”‚ enabled βœ… β”‚ Kubernetes                             β”‚
β”‚ nvidia-device-plugin        β”‚ minikube β”‚ disabled   β”‚ 3rd party (NVIDIA)                     β”‚
β”‚ nvidia-driver-installer     β”‚ minikube β”‚ disabled   β”‚ 3rd party (NVIDIA)                     β”‚
β”‚ nvidia-gpu-device-plugin    β”‚ minikube β”‚ disabled   β”‚ 3rd party (NVIDIA)                     β”‚
β”‚ olm                         β”‚ minikube β”‚ disabled   β”‚ 3rd party (Operator Framework)         β”‚
β”‚ pod-security-policy         β”‚ minikube β”‚ disabled   β”‚ 3rd party (unknown)                    β”‚
β”‚ portainer                   β”‚ minikube β”‚ disabled   β”‚ 3rd party (Portainer.io)               β”‚
β”‚ registry                    β”‚ minikube β”‚ disabled   β”‚ minikube                               β”‚
β”‚ registry-aliases            β”‚ minikube β”‚ disabled   β”‚ 3rd party (unknown)                    β”‚
β”‚ registry-creds              β”‚ minikube β”‚ disabled   β”‚ 3rd party (UPMC Enterprises)           β”‚
β”‚ storage-provisioner         β”‚ minikube β”‚ enabled βœ… β”‚ minikube                               β”‚
β”‚ storage-provisioner-gluster β”‚ minikube β”‚ disabled   β”‚ 3rd party (Gluster)                    β”‚
β”‚ storage-provisioner-rancher β”‚ minikube β”‚ disabled   β”‚ 3rd party (Rancher)                    β”‚
β”‚ volcano                     β”‚ minikube β”‚ disabled   β”‚ third-party (volcano)                  β”‚
β”‚ volumesnapshots             β”‚ minikube β”‚ disabled   β”‚ Kubernetes                             β”‚
β”‚ yakd                        β”‚ minikube β”‚ disabled   β”‚ 3rd party (marcnuri.com)               β”‚
└────────────── β”΄β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
 
# 이제 metric 을 확인할 수 μžˆλ‹€.
meatsby ξ‚° ~ ξ‚° k top no
NAME       CPU(cores)   CPU(%)   MEMORY(bytes)   MEMORY(%)
minikube   309m         3%       1110Mi          14%
 
meatsby ξ‚° ~ ξ‚° k top po
NAME      CPU(cores)   MEMORY(bytes)
bar-app   1m           8Mi
foo-app   1m           6Mi

multi-node cluster ꡬ성

meatsby ξ‚° ~ ξ‚° minikube node list
minikube	192.168.49.2
 
# worker node 3λŒ€ μΆ”κ°€
meatsby ξ‚° ~ ξ‚° minikube node add --worker
πŸ˜„  λ…Έλ“œ m02 λ₯Ό ν΄λŸ¬μŠ€ν„° minikube 에 [worker] 둜 μΆ”κ°€ν•©λ‹ˆλ‹€
❗  CNI 없이 ν΄λŸ¬μŠ€ν„°κ°€ μƒμ„±λ˜μ—ˆμœΌλ―€λ‘œ, ν΄λŸ¬μŠ€ν„°μ— λ…Έλ“œλ₯Ό μΆ”κ°€ν•˜λ©΄ λ„€νŠΈμ›Œν‚Ήμ΄ 쀑단될 수 μžˆμŠ΅λ‹ˆλ‹€.
πŸ‘  "minikube" ν΄λŸ¬μŠ€ν„°μ˜ "minikube-m02" worker λ…Έλ“œλ₯Ό μ‹œμž‘ν•˜λŠ” 쀑
🚜  κΈ°λ³Έ 이미지 v0.0.48λ₯Ό κ°€μ Έμ˜€λŠ” 쀑 ...
πŸ”₯  docker container (CPUs=2, λ©”λͺ¨λ¦¬=2200MB) λ₯Ό μƒμ„±ν•˜λŠ” 쀑 ...
🐳  μΏ λ²„λ„€ν‹°μŠ€ v1.34.0 을 Docker 28.4.0 λŸ°νƒ€μž„μœΌλ‘œ μ„€μΉ˜ν•˜λŠ” 쀑
πŸ”Ž  Kubernetes ꡬ성 μš”μ†Œλ₯Ό 확인...
πŸ„  m02 λ₯Ό minikube 에 μ„±κ³΅μ μœΌλ‘œ μΆ”κ°€ν•˜μ˜€μŠ΅λ‹ˆλ‹€!
 
meatsby ξ‚° ~ ξ‚° minikube node add --worker
πŸ˜„  λ…Έλ“œ m03 λ₯Ό ν΄λŸ¬μŠ€ν„° minikube 에 [worker] 둜 μΆ”κ°€ν•©λ‹ˆλ‹€
πŸ‘  "minikube" ν΄λŸ¬μŠ€ν„°μ˜ "minikube-m03" worker λ…Έλ“œλ₯Ό μ‹œμž‘ν•˜λŠ” 쀑
🚜  κΈ°λ³Έ 이미지 v0.0.48λ₯Ό κ°€μ Έμ˜€λŠ” 쀑 ...
πŸ”₯  docker container (CPUs=2, λ©”λͺ¨λ¦¬=2200MB) λ₯Ό μƒμ„±ν•˜λŠ” 쀑 ...
🐳  μΏ λ²„λ„€ν‹°μŠ€ v1.34.0 을 Docker 28.4.0 λŸ°νƒ€μž„μœΌλ‘œ μ„€μΉ˜ν•˜λŠ” 쀑
πŸ”Ž  Kubernetes ꡬ성 μš”μ†Œλ₯Ό 확인...
πŸ„  m03 λ₯Ό minikube 에 μ„±κ³΅μ μœΌλ‘œ μΆ”κ°€ν•˜μ˜€μŠ΅λ‹ˆλ‹€!
 
meatsby ξ‚° ~ ξ‚° minikube node add --worker
πŸ˜„  λ…Έλ“œ m04 λ₯Ό ν΄λŸ¬μŠ€ν„° minikube 에 [worker] 둜 μΆ”κ°€ν•©λ‹ˆλ‹€
πŸ‘  "minikube" ν΄λŸ¬μŠ€ν„°μ˜ "minikube-m04" worker λ…Έλ“œλ₯Ό μ‹œμž‘ν•˜λŠ” 쀑
🚜  κΈ°λ³Έ 이미지 v0.0.48λ₯Ό κ°€μ Έμ˜€λŠ” 쀑 ...
πŸ”₯  docker container (CPUs=2, λ©”λͺ¨λ¦¬=2200MB) λ₯Ό μƒμ„±ν•˜λŠ” 쀑 ...
🐳  μΏ λ²„λ„€ν‹°μŠ€ v1.34.0 을 Docker 28.4.0 λŸ°νƒ€μž„μœΌλ‘œ μ„€μΉ˜ν•˜λŠ” 쀑
πŸ”Ž  Kubernetes ꡬ성 μš”μ†Œλ₯Ό 확인...
πŸ„  m04 λ₯Ό minikube 에 μ„±κ³΅μ μœΌλ‘œ μΆ”κ°€ν•˜μ˜€μŠ΅λ‹ˆλ‹€!
 
meatsby ξ‚° ~ ξ‚° minikube node list
minikube	192.168.49.2
minikube-m02	192.168.49.3
minikube-m03	192.168.49.4
minikube-m04	192.168.49.5
 
meatsby ξ‚° ~ ξ‚° k get nodes
NAME           STATUS   ROLES           AGE     VERSION
minikube       Ready    control-plane   3h39m   v1.34.0
minikube-m02   Ready    <none>          2m8s    v1.34.0
minikube-m03   Ready    <none>          114s    v1.34.0
minikube-m04   Ready    <none>          99s     v1.34.0
 
# worker node labeling
meatsby ξ‚° ~ ξ‚° k label node minikube-m02 node-role.kubernetes.io/worker=worker
node/minikube-m02 labeled
 
meatsby ξ‚° ~ ξ‚° k label node minikube-m03 node-role.kubernetes.io/worker=worker
node/minikube-m03 labeled
 
meatsby ξ‚° ~ ξ‚° k label node minikube-m04 node-role.kubernetes.io/worker=worker
node/minikube-m04 labeled
 
meatsby ξ‚° ~ ξ‚° k get nodes
NAME           STATUS   ROLES           AGE     VERSION
minikube       Ready    control-plane   3h40m   v1.34.0
minikube-m02   Ready    worker          2m52s   v1.34.0
minikube-m03   Ready    worker          2m38s   v1.34.0
minikube-m04   Ready    worker          2m23s   v1.34.0

References