k -n=admin2406 get deployments.apps -o=jsonk -n=admin2406 get deployments.apps -o=custom-columns=DEPLOYMENT:.metadata.name,CONTAINER_IMAGE:.spec.template.spec.containers[0].image,READY_REPLICAS:.status.readyReplicas,NAMESPACE:.metadata.namespace > /opt/admin2406_data
controlplane ~ k get podsNAME READY STATUS RESTARTS AGEmessaging 1/1 Running 0 4m2scontrolplane ~ k expose pod messaging --port=6379 --name=messaging-serviceservice/messaging-service exposedcontrolplane ~ k get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 172.20.0.1 <none> 443/TCP 48mmessaging-service ClusterIP 172.20.26.32 <none> 6379/TCP 6s
Create Deployment (10)
controlplane ~ k create deployment hr-web-app --image=kodekloud/webapp-color --replicas=2
controlplane ~ โ vi analytics-vpa.yamlcontrolplane ~ โ cat analytics-vpa.yamlapiVersion: autoscaling.k8s.io/v1kind: VerticalPodAutoscalermetadata: name: analytics-vpa namespace: defaultspec: targetRef: apiVersion: apps/v1 kind: Deployment name: analytics-deployment updatePolicy: updateMode: Autocontrolplane ~ โ k apply -f analytics-vpa.yamlverticalpodautoscaler.autoscaling.k8s.io/analytics-vpa createdcontrolplane ~ โ k get vpaNAME MODE CPU MEM PROVIDED AGEanalytics-vpa Auto False 38s
Create Gateway (6)
controlplane ~ โ vi web-gateway.yamlcontrolplane ~ โ cat web-gateway.yamlapiVersion: gateway.networking.k8s.io/v1kind: Gatewaymetadata: name: web-gateway namespace: nginx-gatewayspec: gatewayClassName: nginx listeners: - name: http protocol: HTTP port: 80controlplane ~ โ k apply -f web-gateway.yamlgateway.gateway.networking.k8s.io/web-gateway createdcontrolplane ~ โ k -n nginx-gateway get gatewayNAME CLASS ADDRESS PROGRAMMED AGEweb-gateway nginx True 13s
Helm Chart upgrade (8)
controlplane ~ โ helm list -n kk-nsNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSIONkk-mock1 kk-ns 1 2025-11-06 14:28:24.255194099 +0000 UTC deployed nginx-18.1.0 1.27.0controlplane ~ โ helm repo listNAME URLkk-mock1 https://charts.bitnami.com/bitnamicontrolplane ~ โ helm repo updateHang tight while we grab the latest from your chart repositories......Successfully got an update from the "kk-mock1" chart repositoryUpdate Complete. โHappy Helming!โcontrolplane ~ โ helm repo listNAME URLkk-mock1 https://charts.bitnami.com/bitnamicontrolplane ~ โ helm search repo nginxNAME CHART VERSION APP VERSION DESCRIPTIONkk-mock1/nginx 22.2.4 1.29.3 NGINX Open Source is a web server that can be a...kk-mock1/nginx-ingress-controller 12.0.7 1.13.1 NGINX Ingress Controller is an Ingress controll...kk-mock1/nginx-intel 2.1.15 0.4.9 DEPRECATED NGINX Open Source for Intel is a lig...controlplane ~ โ helm search repo nginx --versions | grep 18.1.15kk-mock1/nginx 18.1.15 1.27.1 NGINX Open Source is a web server that can be a...controlplane ~ โ helm upgrade kk-mock1 kk-mock1/nginx --version=18.1.5 -n kk-nsRelease "kk-mock1" has been upgraded. Happy Helming!NAME: kk-mock1LAST DEPLOYED: Thu Nov 6 14:31:52 2025NAMESPACE: kk-nsSTATUS: deployedREVISION: 2TEST SUITE: NoneNOTES:CHART NAME: nginxCHART VERSION: 18.1.5APP VERSION: 1.27.0** Please be patient while the chart is being deployed **NGINX can be accessed through the following DNS name from within your cluster: kk-mock1-nginx.kk-ns.svc.cluster.local (port 80)To access NGINX from outside the cluster, follow the steps below:1. Get the NGINX URL by running these commands: NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace kk-ns -w kk-mock1-nginx' export SERVICE_PORT=$(kubectl get --namespace kk-ns -o jsonpath="{.spec.ports[0].port}" services kk-mock1-nginx) export SERVICE_IP=$(kubectl get svc --namespace kk-ns kk-mock1-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo "http://${SERVICE_IP}:${SERVICE_PORT}"WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs: - cloneStaticSiteFromGit.gitSync.resources - resources+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/โ SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables.Substituted images detected: - %!s(<nil>)/:%!s(<nil>)controlplane ~ โ helm list -n kk-nsNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSIONkk-mock1 kk-ns 2 2025-11-06 14:31:52.339403678 +0000 UTC deployed nginx-18.1.5 1.27.0
controlplane ~ โ k -n ingress-ns get deployNAME READY UP-TO-DATE AVAILABLE AGEwebapp-deploy 1/1 1 1 10mcontrolplane ~ โ k -n ingress-ns get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEwebapp-svc ClusterIP 172.20.133.154 <none> 80/TCP 10mcontrolplane ~ โ vi webapp-ingress.yamlcontrolplane ~ โ cat webapp-ingress.yamlapiVersion: networking.k8s.io/v1kind: Ingressmetadata: name: webapp-ingress namespace: ingress-ns annotations: nginx.ingress.kubernetes.io/rewrite-target: /spec: ingressClassName: nginx rules: - host: "kodekloud-ingress.app" http: paths: - path: / pathType: Prefix backend: service: name: webapp-svc port: number: 80controlplane ~ โ k create -f webapp-ingress.yamlingress.networking.k8s.io/webapp-ingress createdcontrolplane ~ โ k -n ingress-ns get ingressNAME CLASS HOSTS ADDRESS PORTS AGEwebapp-ingress nginx kodekloud-ingress.app 80 9scontrolplane ~ โ curl -s http://kodekloud-ingress.app/<!DOCTYPE html><html><head><title>Welcome to nginx!</title><style>html { color-scheme: light dark; }body { width: 35em; margin: 0 auto;font-family: Tahoma, Verdana, Arial, sans-serif; }</style></head><body><h1>Welcome to nginx!</h1><p>If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.</p><p>For online documentation and support please refer to<a href="http://nginx.org/">nginx.org</a>.<br/>Commercial support is available at<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p></body></html>
Create Role and RoleBinding (10)
controlplane ~ โ cat /root/CKA/john.csr | base64 | tr -d '\n'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==controlplane ~ โ vi csr.yamlcontrolplane ~ โ cat csr.yamlapiVersion: certificates.k8s.io/v1kind: CertificateSigningRequestmetadata: name: john-developerspec: signerName: kubernetes.io/kube-apiserver-client request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUp6Nm51VE5XK3NjenMrdk5veWMzT2ZhOFhCMUZWYVhsbS9SUGtmMFdnWjhHbG0yCnkyU1paQ0YzUmtJKzJFamQ0V1RlYVN3dnFiNUdPU0o3N2ZiaUx6aUd2SS80VTdQM1JvMnNWVG5Ra0RCb2VQczIKQm5SK2FzVjRnbmZuWDUrZklWRFJaMmt2eFRoeXFFZStWQ3p0eDkyYTNSVWszWk9xa0J0Y24vOFd5TURjaFFSagpteXZ6MmtEZTBWbFc4eC9yUHpPZGpNSCtia3N6YjRxcVczUVllTkRKUklMMHVMOXdXUy9PRTl6eklKeXhDbFQ1Cm5UWTRWam5VaGE5MjFYSld5a3dvMkVaMW8vbnRBUG5uWHlJL3lJQ3htSW5QY3RLRFJLMWhPVWg1QlRwMXl1dFYKOG1oa1F2RWNkTW1FU0FWOTJIQXpub2VQMjRlaitwbkt5a1lFdlZrQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQlJlSGhBWDBMT21ab3U1dEtPcDRkZ0tSRmNoV3FpRkpaaWptWTNaTkVJcDIzSGErZ1crWHp5CkU3a2h5VUt1QzBIRkFBYURVQ1I1SE9qSlBPL3owV21aRGpDc3gxM1BnVmxlZmJINkIwQkI4RVpWVXBzWnFraGgKQ1l5Y05VaHdycU5BcWxPU3ZPdmkvUEdldXp1NUZxaE0vK3JXdFRrbWdYSDlyZTlLNXhCWVM5UXR0TDVBTlY1SgpldkFYY3B2UDZRS2dkYWJHbDEzc3F5bGdsdWg1VEZSNXhTOUlDSnhYSm9Od3BtdEd6RG1PaFpFNllid250Z2thCjd5bkJ4eUNoRmlTLzloNDFDeXd6dFlUK0s0d2ROeTczUnk0TEd5eEl2ZkIySS96L2dkQ0cvTTljOFVEWUplQmcKSmMwdlVGalVCMzBHTTR2MjdOV0VjeFhHb21KWHFRKzQKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg== usages: - digital signature - key encipherment - client authcontrolplane ~ โ k apply -f csr.yamlcertificatesigningrequest.certificates.k8s.io/john-developer createdcontrolplane ~ โ k get csrNAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITIONcsr-f79dh 38m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pr9dl6 <none> Approved,Issuedcsr-wgd7n 39m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane <none> Approved,Issuedjohn-developer 4s kubernetes.io/kube-apiserver-client kubernetes-admin <none> Pendingcontrolplane ~ โ k certificate approve john-developercertificatesigningrequest.certificates.k8s.io/john-developer approvedcontrolplane ~ โ k get csrNAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITIONcsr-f79dh 39m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pr9dl6 <none> Approved,Issuedcsr-wgd7n 40m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane <none> Approved,Issuedjohn-developer 28s kubernetes.io/kube-apiserver-client kubernetes-admin <none> Approved,Issuedcontrolplane ~ โ vi rbac.yamlcontrolplane ~ โ cat rbac.yamlapiVersion: rbac.authorization.k8s.io/v1kind: Rolemetadata: namespace: development name: developerrules:- apiGroups: [""] resources: ["pods"] verbs: ["create", "get", "update", "list", "delete"]---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata: name: john-dev-role-binding namespace: developmentsubjects:- kind: User name: john apiGroup: rbac.authorization.k8s.ioroleRef: kind: Role name: developer apiGroup: rbac.authorization.k8s.iocontrolplane ~ โ k apply -f rbac.yamlrole.rbac.authorization.k8s.io/developer createdrolebinding.rbac.authorization.k8s.io/john-dev-role-binding createdcontrolplane ~ โ k auth can-i create pods --as=john -n developmentyescontrolplane ~ โ k auth can-i create pods --as=johnno
Create ClusterIP Service and test dns lookup (10)
controlplane ~ โ k run nginx-resolver --image=nginxpod/nginx-resolver createdcontrolplane ~ โ k expose pod nginx-resolver --name=nginx-resolver-svc --port=80 --target-port=80 --type=ClusterIPservice/nginx-resolver-svc exposedcontrolplane ~ โ k get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 172.20.0.1 <none> 443/TCP 51mnginx-resolver-svc ClusterIP 172.20.137.21 <none> 80/TCP 4scontrolplane ~ โ k describe svc nginx-resolver-svcName: nginx-resolver-svcNamespace: defaultLabels: run=nginx-resolverAnnotations: <none>Selector: run=nginx-resolverType: ClusterIPIP Family Policy: SingleStackIP Families: IPv4IP: 172.20.137.21IPs: 172.20.137.21Port: <unset> 80/TCPTargetPort: 80/TCPEndpoints: 172.17.1.15:80Session Affinity: NoneInternal Traffic Policy: ClusterEvents: <none>controlplane ~ โ k get pod -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESnginx-deploy-5846bc77f5-2szm4 1/1 Running 0 20m 172.17.1.14 node01 <none> <none>nginx-resolver 1/1 Running 0 85s 172.17.1.15 node01 <none> <none>controlplane ~ โ k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup nginx-resolver-svc > /root/CKA/nginx.svcAll commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.If you don't see a command prompt, try pressing enter.controlplane ~ โ cat /root/CKA/nginx.svc Address 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.localName: nginx-resolver-svcAddress 1: 172.20.137.21 nginx-resolver-svc.default.svc.cluster.localpod "test-nslookup" deleted from default namespacecontrolplane ~ โ k run test-nslookup --image=busybox:1.28 --rm -it --restart=Never -- nslookup 172-17-1-15.default.pod > /root/CKA/nginx.podAll commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.If you don't see a command prompt, try pressing enter.controlplane ~ โ cat /root/CKA/nginx.podAddress 1: 172.20.0.10 kube-dns.kube-system.svc.cluster.localName: 172-17-1-15.default.podAddress 1: 172.17.1.15 172-17-1-15.nginx-resolver-svc.default.svc.cluster.localpod "test-nslookup" deleted from default namespace
Create StaticPod on node01 (10)
controlplane ~ โ k get nodesNAME STATUS ROLES AGE VERSIONcontrolplane Ready control-plane 59m v1.34.0node01 Ready <none> 59m v1.34.0controlplane ~ โ k run nginx-critical --image=nginx --dry-run=client -o=yaml > static.yamlcontrolplane ~ โ cat static.yamlapiVersion: v1kind: Podmetadata: labels: run: nginx-critical name: nginx-criticalspec: containers: - image: nginx name: nginx-critical resources: {} dnsPolicy: ClusterFirst restartPolicy: Alwaysstatus: {}controlplane ~ โ ssh node01Welcome to Ubuntu 22.04.5 LTS (GNU/Linux 5.15.0-1083-gcp x86_64) * Documentation: https://help.ubuntu.com * Management: https://landscape.canonical.com * Support: https://ubuntu.com/proThis system has been minimized by removing packages and content that arenot required on a system that users do not log into.To restore this content, you can run the 'unminimize' command.node01 ~ โ cd /etc/kubernetes/manifests/node01 /etc/kubernetes/manifests โ lsnode01 /etc/kubernetes/manifests โ vi static.yamlnode01 /etc/kubernetes/manifests โ cat static.yamlapiVersion: v1kind: Podmetadata: labels: run: nginx-critical name: nginx-criticalspec: containers: - image: nginx name: nginx-critical resources: {} dnsPolicy: ClusterFirst restartPolicy: Alwaysstatus: {}node01 /etc/kubernetes/manifests โ exitlogoutConnection to node01 closed.controlplane ~ โ k get pod -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESnginx-critical-node01 1/1 Running 0 23s 172.17.1.23 node01 <none> <none>nginx-deploy-5846bc77f5-2szm4 1/1 Running 0 31m 172.17.1.14 node01 <none> <none>nginx-resolver 1/1 Running 0 12m 172.17.1.15 node01 <none> <none>
Create HPA (10)
controlplane ~ โ lsCKA csr.yaml local-sc.yaml logging-deploy.yaml rbac.yaml static.yaml webapp-hpa.yaml webapp-ingress.yamlcontrolplane ~ โ vi webapp-hpa.yamlcontrolplane ~ โ cat webapp-hpa.yamlapiVersion: autoscaling/v2kind: HorizontalPodAutoscalermetadata: name: backend-hpa namespace: backendspec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: backend-deployment minReplicas: 3 maxReplicas: 15 metrics: - type: Resource resource: name: memory target: type: Utilization averageUtilization: 65controlplane ~ โ k apply -f webapp-hpa.yamlhorizontalpodautoscaler.autoscaling/backend-hpa unchangedcontrolplane ~ โ k -n backend describe hpaName: backend-hpaNamespace: backendLabels: <none>Annotations: <none>CreationTimestamp: Sat, 08 Nov 2025 11:57:11 +0000Reference: Deployment/backend-deploymentMetrics: ( current / target ) resource memory on pods (as a percentage of request): <unknown> / 65%Min replicas: 3Max replicas: 15Deployment pods: 3 current / 0 desiredConditions: Type Status Reason Message ---- ------ ------ ------- AbleToScale True SucceededGetScale the HPA controller was able to get the target's current scale ScalingActive False FailedGetResourceMetric the HPA was unable to compute the replica count: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedGetResourceMetric 4s (x6 over 79s) horizontal-pod-autoscaler failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io) Warning FailedComputeMetricsReplicas 4s (x6 over 79s) horizontal-pod-autoscaler invalid metrics (1 invalid out of 1), first error is: failed to get memory resource metric value: failed to get memory utilization: unable to get metrics for resource memory: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)