k -n=admin2406 get deployments.apps -o=jsonk -n=admin2406 get deployments.apps -o=custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[0].image,READY_REPLICAS:.status.readyReplicas,NAMESPACE:.metadata.namespace > /opt/admin2406_data
# kubeconfig 파일을 확인하고kubectl get pods --kubeconfig /root/CKA/admin.kubeconfig# 기본 설정을 확인한 뒤cat ~/.kube/config# 틀린 부분을 수정하고vi /root/CKA/admin.kubeconfig# 다시 kubeconfig 파일을 확인kubectl get pods --kubeconfig /root/CKA/admin.kubeconfig
Fix kubeconfig (8)
controlplane ~ ➜ cat CKA/super.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ...
server: https://controlplane:9999
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
users:
- name: kubernetes-admin
user:
client-certificate-data: ...
client-key-data: ...
controlplane ~ ➜ k get node --kubeconfig=/root/CKA/super.kubeconfig
E1126 14:09:25.390277 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.390610 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.392730 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.393061 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
E1126 14:09:25.394498 46273 memcache.go:265] "Unhandled Error" err="couldn't get current server API group list: Get \"https://controlplane:9999/api?timeout=32s\": dial tcp 192.168.0.191:9999: connect: connection refused"
The connection to the server controlplane:9999 was refused - did you specify the right host or port?
controlplane ~ ✖ sudo netstat -tulnp | grep kube-apiserver
tcp6 0 0 :::6443 :::* LISTEN 2847/kube-apiserver
# port 수정
controlplane ~ ➜ vi /root/CKA/super.kubeconfig
controlplane ~ ➜ k get node --kubeconfig=/root/CKA/super.kubeconfig
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 49m v1.34.0
node01 Ready <none> 48m v1.34.0
Node Troubleshooting
Node Troubleshooting
# 클러스터에서 NotReady 상태의 노드 확인# 원인 파악, 해당 노드를 Ready 상태로 만들기# 1. containerd 가 작동하고 있어야 함# 2. kubelet 이 작동하고 있어야 함# 3. cni 가 작동하고 있어야 함kubectl get nodesssh hk8s-worker2systemctl status containerdsystemctl status kubelet # 보통 kubelet 이 inactive 상태임systemctl enable --now kubeletsystemctl status kubeletexitkubectl get nodes
Fix controller-manager (10)
controlplane ~ ➜ k get deploy nginx-deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 1/1 1 1 5m36s
controlplane ~ ➜ k scale deployment nginx-deploy --replicas=3
deployment.apps/nginx-deploy scaled
controlplane ~ ➜ k get deploy nginx-deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 1/3 1 1 6m16s
# deployment-controller 에서 이벤트가 전달되지 않음
controlplane ~ ➜ k describe deploy nginx-deploy | grep -A5 Events
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 8m36s deployment-controller Scaled up replica set nginx-deploy-59874dbc6b from 0 to 1
# controller-manager 가 없음
controlplane ~ ➜ k get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-587f6db6c5-bkn26 1/1 Running 0 55m
canal-jg7mj 2/2 Running 0 54m
canal-lmhj8 2/2 Running 0 55m
coredns-6678bcd974-jkff2 1/1 Running 0 55m
coredns-6678bcd974-wjcgs 1/1 Running 0 55m
etcd-controlplane 1/1 Running 0 55m
kube-apiserver-controlplane 1/1 Running 0 55m
kube-proxy-mqwb8 1/1 Running 0 55m
kube-proxy-vcc8z 1/1 Running 0 54m
kube-scheduler-controlplane 1/1 Running 0 55m
# manifest 수정
controlplane ~ ➜ vi /etc/kubernetes/manifests/kube-controller-manager.yaml
controlplane ~ ➜ k get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-587f6db6c5-bkn26 1/1 Running 0 58m
canal-jg7mj 2/2 Running 0 57m
canal-lmhj8 2/2 Running 0 58m
coredns-6678bcd974-jkff2 1/1 Running 0 58m
coredns-6678bcd974-wjcgs 1/1 Running 0 58m
etcd-controlplane 1/1 Running 0 58m
kube-apiserver-controlplane 1/1 Running 0 58m
kube-controller-manager-controlplane 1/1 Running 0 21s
kube-proxy-mqwb8 1/1 Running 0 58m
kube-proxy-vcc8z 1/1 Running 0 57m
kube-scheduler-controlplane 1/1 Running 0 58m
controlplane ~ ➜ k get deploy nginx-deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 3/3 3 3 12m
controlplane ~ k get podsNAME READY STATUS RESTARTS AGEmessaging 1/1 Running 0 4m2scontrolplane ~ k expose pod messaging --port=6379 --name=messaging-serviceservice/messaging-service exposedcontrolplane ~ k get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 172.20.0.1 <none> 443/TCP 48mmessaging-service ClusterIP 172.20.26.32 <none> 6379/TCP 6s
controlplane ~ ➜ k run nginx-resolver --image=nginxpod/nginx-resolver createdcontrolplane ~ ➜ k expose pod nginx-resolver --name=nginx-resolver-svc --port=80 --target-port=80 --type=ClusterIPservice/nginx-resolver-svc exposedcontrolplane ~ ➜ k get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 172.20.0.1 <none> 443/TCP 51mnginx-resolver-svc ClusterIP 172.20.137.21 <none> 80/TCP 4scontrolplane ~ ➜ k describe svc nginx-resolver-svcName: nginx-resolver-svcNamespace: defaultLabels: run=nginx-resolverAnnotations: <none>Selector: run=nginx-resolverType: ClusterIPIP Family Policy: SingleStackIP Families: IPv4IP: 172.20.137.21IPs: 172.20.137.21Port: <unset> 80/TCPTargetPort: 80/TCPEndpoints: 172.17.1.15:80Session Affinity: NoneInternal Traffic Policy: ClusterEvents: <none>controlplane ~ ➜ k get pod -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESnginx-deploy-5846bc77f5-2szm4 1/1 Running 0 20m 172.17.1.14 node01 <none> <none>nginx-resolver 1/1 Running 0 85s 172.17.1.15 node01 <none> <none>controlplane ~ ➜ k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-svc > /root/CKA/nginx.svcAll commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.If you don't see a command prompt, try pressing enter.controlplane ~ ➜ cat /root/CKA/nginx.svc Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.localName: nginx-resolver-svcAddress 1: 172.20.137.21 nginx-resolver-svc.default.svc.cluster.localpod "test-nslookup" deleted from default namespacecontrolplane ~ ➜ k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup 172-17-1-15.default.pod > /root/CKA/nginx.podAll commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.If you don't see a command prompt, try pressing enter.controlplane ~ ➜ cat /root/CKA/nginx.podAddress 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.localName: 172-17-1-15.default.podAddress 1: 172.17.1.15 172-17-1-15.nginx-resolver-svc.default.svc.cluster.localpod "test-nslookup" deleted from default namespace
Storage Class, PV, PVC
Fix PVC
# Deployment 확인kubectl get deployment -n alpha alpha-mysql -o yaml | yq e .spec.template.spec.containers -# Pod 에러 확인kubectl get pods -n alphakubectl describe pod -n alpha alpha-mysql-xxxxxxxx-xxxxx# PV 확인kubectl get pv alpha-pv
controlplane ~ ➜ vi analytics-vpa.yamlcontrolplane ~ ➜ cat analytics-vpa.yamlapiVersion: autoscaling.k8s.io/v1kind: VerticalPodAutoscalermetadata: name: analytics-vpa namespace: defaultspec: targetRef: apiVersion: apps/v1 kind: Deployment name: analytics-deployment updatePolicy: updateMode: Autocontrolplane ~ ➜ k apply -f analytics-vpa.yamlverticalpodautoscaler.autoscaling.k8s.io/analytics-vpa createdcontrolplane ~ ➜ k get vpaNAME MODE CPU MEM PROVIDED AGEanalytics-vpa Auto False 38s
Create HPA (10)
controlplane ~ ➜ lsCKA csr.yaml local-sc.yaml logging-deploy.yaml rbac.yaml static.yaml webapp-hpa.yaml webapp-ingress.yamlcontrolplane ~ ➜ vi webapp-hpa.yamlcontrolplane ~ ➜ cat webapp-hpa.yamlapiVersion: autoscaling/v2kind: HorizontalPodAutoscalermetadata: name: backend-hpa namespace: backendspec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: backend-deployment minReplicas: 3 maxReplicas: 15 metrics: - type: Resource resource: name: memory target: type: Utilization averageUtilization: 65controlplane ~ ➜ k apply -f webapp-hpa.yamlhorizontalpodautoscaler.autoscaling/backend-hpa unchangedcontrolplane ~ ➜ k -n backend describe hpaName: backend-hpaNamespace: backendLabels: <none>Annotations: <none>CreationTimestamp: Sat, 08 Nov 2025 11:57:11 +0000Reference: Deployment/backend-deploymentMetrics: ( current / target ) resource memory on pods (as a percentage of request): <unknown> / 65%Min replicas: 3Max replicas: 15Deployment pods: 3 current / 0 desiredConditions: Type Status Reason Message ---- ------ ------ ------- AbleToScale True SucceededGetScale the HPA controller was able to get the target's current scale ScalingActive False FailedGetResourceMetric the HPA was unable to compute the replica count: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedGetResourceMetric 4s (x6 over 79s) horizontal-pod-autoscaler failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io) Warning FailedComputeMetricsReplicas 4s (x6 over 79s) horizontal-pod-autoscaler invalid metrics (1 invalid out of 1), first error is: failed to get memory resource metric value: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)
Create HPA (6)
controlplane ~ ➜ vi hpa.yaml
controlplane ~ ➜ cat hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: api-hpa
namespace: api
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: api-deployment
minReplicas: 1
maxReplicas: 20
metrics:
- type: Pods
pods:
metric:
name: requests_per_second
target:
type: AverageValue
averageValue: "1000"
controlplane ~ ➜ k apply -f hpa.yaml
horizontalpodautoscaler.autoscaling/api-hpa created
controlplane ~ ➜ k describe hpa -n api
Name: api-hpa
Namespace: api
Labels: <none>
Annotations: <none>
CreationTimestamp: Wed, 26 Nov 2025 14:29:06 +0000
Reference: Deployment/api-deployment
Metrics: ( current / target )
"requests_per_second" on pods: <unknown> / 1k
Min replicas: 1
Max replicas: 20
Deployment pods: 0 current / 0 desired
Events: <none>
Gateway API 및 TLS 연동, Ingress
Create Gateway (6)
controlplane ~ ➜ vi web-gateway.yamlcontrolplane ~ ➜ cat web-gateway.yamlapiVersion: gateway.networking.k8s.io/v1kind: Gatewaymetadata: name: web-gateway namespace: nginx-gatewayspec: gatewayClassName: nginx listeners: - name: http protocol: HTTP port: 80controlplane ~ ➜ k apply -f web-gateway.yamlgateway.gateway.networking.k8s.io/web-gateway createdcontrolplane ~ ➜ k -n nginx-gateway get gatewayNAME CLASS ADDRESS PROGRAMMED AGEweb-gateway nginx True 13s
Create Ingress (10)
controlplane ~ ➜ k -n ingress-ns get deployNAME READY UP-TO-DATE AVAILABLE AGEwebapp-deploy 1/1 1 1 10mcontrolplane ~ ➜ k -n ingress-ns get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEwebapp-svc ClusterIP 172.20.133.154 <none> 80/TCP 10mcontrolplane ~ ➜ vi webapp-ingress.yamlcontrolplane ~ ➜ cat webapp-ingress.yamlapiVersion: networking.k8s.io/v1kind: Ingressmetadata: name: webapp-ingress namespace: ingress-ns annotations: nginx.ingress.kubernetes.io/rewrite-target: /spec: ingressClassName: nginx rules: - host: "kodekloud-ingress.app" http: paths: - path: / pathType: Prefix backend: service: name: webapp-svc port: number: 80controlplane ~ ➜ k create -f webapp-ingress.yamlingress.networking.k8s.io/webapp-ingress createdcontrolplane ~ ➜ k -n ingress-ns get ingressNAME CLASS HOSTS ADDRESS PORTS AGEwebapp-ingress nginx kodekloud-ingress.app 80 9scontrolplane ~ ➜ curl -s http://kodekloud-ingress.app/<!DOCTYPE html><html><head><title>Welcome to nginx!</title><style>html { color-scheme: light dark; }body { width: 35em; margin: 0 auto;font-family: Tahoma, Verdana, Arial, sans-serif; }</style></head><body><h1>Welcome to nginx!</h1><p>If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.</p><p>For online documentation and support please refer to<a href="http://nginx.org/">nginx.org</a>.<br/>Commercial support is available at<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p></body></html>
controlplane ~ ➜ k get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.20.0.1 <none> 443/TCP 71m
np-test-service ClusterIP 172.20.59.134 <none> 80/TCP 42m
web-service ClusterIP 172.20.154.119 <none> 80/TCP 4m34s
web-service-v2 ClusterIP 172.20.191.4 <none> 80/TCP 4m34s
controlplane ~ ➜ k get gateway
NAME CLASS ADDRESS PROGRAMMED AGE
web-gateway nginx True 4m38s
controlplane ~ ➜ vi hr.yaml
controlplane ~ ➜ cat hr.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: web-route
spec:
parentRefs:
- name: web-gateway
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: web-service
port: 80
weight: 80
- name: web-service-v2
port: 80
weight: 20
controlplane ~ ➜ k apply -f hr.yaml
httproute.gateway.networking.k8s.io/web-route created
Helm
Helm Chart upgrade (8)
controlplane ~ ➜ helm list -n kk-nsNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSIONkk-mock1 kk-ns 1 2025-11-06 14:28:24.255194099 +0000 UTC deployed nginx-18.1.0 1.27.0controlplane ~ ➜ helm repo listNAME URLkk-mock1 https://charts.bitnami.com/bitnamicontrolplane ~ ➜ helm repo updateHang tight while we grab the latest from your chart repositories......Successfully got an update from the "kk-mock1" chart repositoryUpdate Complete. ⎈Happy Helming!⎈controlplane ~ ➜ helm repo listNAME URLkk-mock1 https://charts.bitnami.com/bitnamicontrolplane ~ ➜ helm search repo nginxNAME CHART VERSION APP VERSION DESCRIPTIONkk-mock1/nginx 22.2.4 1.29.3 NGINX Open Source is a web server that can be a...kk-mock1/nginx-ingress-controller 12.0.7 1.13.1 NGINX Ingress Controller is an Ingress controll...kk-mock1/nginx-intel 2.1.15 0.4.9 DEPRECATED NGINX Open Source for Intel is a lig...controlplane ~ ➜ helm search repo nginx --versions | grep 18.1.15kk-mock1/nginx 18.1.15 1.27.1 NGINX Open Source is a web server that can be a...controlplane ~ ➜ helm upgrade kk-mock1 kk-mock1/nginx --version=18.1.5 -n kk-nsRelease "kk-mock1" has been upgraded. Happy Helming!NAME: kk-mock1LAST DEPLOYED: Thu Nov 6 14:31:52 2025NAMESPACE: kk-nsSTATUS: deployedREVISION: 2TEST SUITE: NoneNOTES:CHART NAME: nginxCHART VERSION: 18.1.5APP VERSION: 1.27.0** Please be patient while the chart is being deployed **NGINX can be accessed through the following DNS name from within your cluster: kk-mock1-nginx.kk-ns.svc.cluster.local (port 80)To access NGINX from outside the cluster, follow the steps below:1. Get the NGINX URL by running these commands: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace kk-ns -w kk-mock1-nginx' export SERVICE_PORT=$(kubectl get --namespace kk-ns -o jsonpath="{.spec.ports[0].port}" services kk-mock1-nginx) export SERVICE_IP=$(kubectl get svc --namespace kk-ns kk-mock1-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo "http://${SERVICE_IP}:${SERVICE_PORT}"WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs: - cloneStaticSiteFromGit.gitSync.resources - resources+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/⚠ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.Substituted images detected: - %!s(<nil>)/:%!s(<nil>)controlplane ~ ➜ helm list -n kk-nsNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSIONkk-mock1 kk-ns 2 2025-11-06 14:31:52.339403678 +0000 UTC deployed nginx-18.1.5 1.27.0
controlplane ~ ➜ k create serviceaccount pvviewer
serviceaccount/pvviewer created
controlplane ~ ➜ k get sa
NAME SECRETS AGE
default 0 19m
pvviewer 0 4s
controlplane ~ ➜ k create clusterrole pvviewer-role --resource=persistentvolumes --verb=list
clusterrole.rbac.authorization.k8s.io/pvviewer-role created
controlplane ~ ➜ k describe clusterrole pvviewer-role
Name: pvviewer-role
Labels: <none>
Annotations: <none>
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
persistentvolumes [] [] [list]
controlplane ~ ➜ k create clusterrolebinding pvviewer-role-binding --clusterrole=pvviewer-role --serviceaccount=default:pvviewer
clusterrolebinding.rbac.authorization.k8s.io/pvviewer-role-binding created
controlplane ~ ➜ k describe clusterrolebindings pvviewer-role-binding
Name: pvviewer-role-binding
Labels: <none>
Annotations: <none>
Role:
Kind: ClusterRole
Name: pvviewer-role
Subjects:
Kind Name Namespace
---- ---- ---------
ServiceAccount pvviewer default
controlplane ~ ➜ k run pvviewer --image=redis --dry-run=client -o yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: pvviewer
name: pvviewer
spec:
containers:
- image: redis
name: pvviewer
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
controlplane ~ ➜ vi redis.yaml
controlplane ~ ➜ k apply -f redis.yaml
pod/pvviewer created
controlplane ~ ➜ k get pod
NAME READY STATUS RESTARTS AGE
pvviewer 1/1 Running 0 6s
CNI 및 Network Policy 배포
NetworkPolicy (6)
controlplane ~ ➜ lsCKA kodekloud.crt local-sc.yaml net-pol-1.yaml net-pol-3.yaml static.yaml webapp-ingress.yamlcsr.yaml kodekloud.key logging-deploy.yaml net-pol-2.yaml rbac.yaml webapp-hpa.yaml web-gateway.yamlcontrolplane ~ ➜ cat net-pol-1.yamlapiVersion: networking.k8s.io/v1kind: NetworkPolicymetadata: name: net-policy-1 namespace: backendspec: podSelector: {} ingress: - from: - namespaceSelector: matchLabels: access: allowed ports: - protocol: TCP port: 80controlplane ~ ➜ cat net-pol-2.yamlapiVersion: networking.k8s.io/v1kind: NetworkPolicymetadata: name: net-policy-2 namespace: backendspec: podSelector: {} ingress: - from: - namespaceSelector: matchLabels: name: frontend - namespaceSelector: matchLabels: name: databases ports: - protocol: TCP port: 80controlplane ~ ➜ cat net-pol-3.yamlapiVersion: networking.k8s.io/v1kind: NetworkPolicymetadata: name: net-policy-3 namespace: backendspec: podSelector: {} ingress: - from: - namespaceSelector: matchLabels: name: frontend ports: - protocol: TCP port: 80controlplane ~ ➜ k get ns --show-labelsNAME STATUS AGE LABELSatlanta-page-04 Active 75m kubernetes.io/metadata.name=atlanta-page-04backend Active 18m kubernetes.io/metadata.name=backend,name=backendcka5673 Active 7m30s kubernetes.io/metadata.name=cka5673default Active 81m kubernetes.io/metadata.name=defaultdevelopment Active 51m kubernetes.io/metadata.name=developmentdigi-locker-02 Active 75m kubernetes.io/metadata.name=digi-locker-02frontend Active 93s kubernetes.io/metadata.name=frontend,name=frontendingress-nginx Active 66m app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx,kubernetes.io/metadata.name=ingress-nginxingress-ns Active 66m kubernetes.io/metadata.name=ingress-nskube-node-lease Active 81m kubernetes.io/metadata.name=kube-node-leasekube-public Active 81m kubernetes.io/metadata.name=kube-publickube-system Active 81m kubernetes.io/metadata.name=kube-systemlogging-ns Active 72m kubernetes.io/metadata.name=logging-nsnginx-gateway Active 75m kubernetes.io/metadata.name=nginx-gatewaysecurity-alpha-01 Active 75m kubernetes.io/metadata.name=security-alpha-01web-dashboard-03 Active 75m kubernetes.io/metadata.name=web-dashboard-03controlplane ~ ➜ k apply -f net-pol-3.yamlnetworkpolicy.networking.k8s.io/net-policy-3 created
Create NetworkPolicy (8)
controlplane ~ ➜ k get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
np-test-1 1/1 Running 0 4m27s run=np-test-1
pvviewer 1/1 Running 0 16m <none>
controlplane ~ ➜ cat np.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: ingress-to-nptest
namespace: default
spec:
podSelector:
matchLabels:
run: np-test-1
policyTypes:
- Ingress
ingress:
- ports:
- protocol: TCP
port: 80
controlplane ~ ➜ k apply -f np.yaml
networkpolicy.networking.k8s.io/ingress-to-nptest created
Create ConfigMap (8)
controlplane ~ ➜ k create configmap app-config -n cm-namespace --from-literal=ENV=production --from-literal=LOG_LEVEL=info
configmap/app-config created
controlplane ~ ➜ k -n cm-namespace get cm
NAME DATA AGE
app-config 2 13s
kube-root-ca.crt 1 58s
controlplane ~ ➜ k -n cm-namespace get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
cm-webapp 1/1 1 1 2m26s
controlplane ~ ➜ k edit -n cm-namespace deployment cm-webapp
...
spec:
containers:
- image: nginx
imagePullPolicy: Always
name: nginx
# ConfigMap 설정 추가
envFrom:
- configMapRef:
name: app-config
...
deployment.apps/cm-webapp edited
Create PriorityClass (8)
controlplane ~ ➜ k create priorityclass low-priority --value=50000
priorityclass.scheduling.k8s.io/low-priority created
controlplane ~ ➜ k -n low-priority get pod
NAME READY STATUS RESTARTS AGE
lp-pod 1/1 Running 0 41s
controlplane ~ ➜ k -n low-priority edit pod lp-pod
error: pods "lp-pod" is invalid
A copy of your changes has been stored to "/tmp/kubectl-edit-1609190342.yaml"
error: Edit cancelled, no valid changes were saved.
controlplane ~ ✖ k replace -f /tmp/kubectl-edit-1609190342.yaml --force
pod "lp-pod" deleted from low-priority namespace
Error from server (Forbidden): pods "lp-pod" is forbidden: the integer value of priority (0) must not be provided in pod spec; priority admission controller computed 50000 from the given PriorityClass name
# spec.priority 제거
controlplane ~ ✖ vi /tmp/kubectl-edit-1609190342.yaml
controlplane ~ ➜ k replace -f /tmp/kubectl-edit-1609190342.yaml --force
pod/lp-pod replaced
Taint, Toleration, NodeAffinity
Taint and Toleration (12)
controlplane ~ ➜ k get nodes
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 36m v1.34.0
node01 Ready <none> 35m v1.34.0
controlplane ~ ➜ k taint node node01 env_type=production:NoSchedule
node/node01 tainted
controlplane ~ ➜ k describe node node01 | grep -i taint
Taints: env_type=production:NoSchedule
controlplane ~ ➜ k run dev-redis --image=redis:alpine
pod/dev-redis created
controlplane ~ ➜ vi prod-redis.yaml
controlplane ~ ➜ cat prod-redis.yaml
apiVersion: v1
kind: Pod
metadata:
name: prod-redis
spec:
containers:
- name: prod-redis
image: redis:alpine
tolerations:
- key: "env_type"
operator: "Equal"
value: "production"
effect: "NoSchedule"
controlplane ~ ➜ k apply -f prod-redis.yaml
pod/prod-redis created
controlplane ~ ➜ k get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dev-redis 1/1 Running 0 3m5s 172.17.0.5 controlplane <none> <none>
np-test-1 1/1 Running 0 11m 172.17.1.8 node01 <none> <none>
prod-redis 1/1 Running 0 7s 172.17.1.9 node01 <none> <none>
pvviewer 1/1 Running 0 23m 172.17.1.3 node01 <none> <none>