CKA 예제 리마인더 - 38. Mock Exam - 2
ETCD backup을 /opt/etcd-backup.db 로 save하세요
controlplane ~ ➜ cat /etc/kubernetes/manifests/etcd.yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadhttp://m.kubernetes.io/etcd.advertise-client-urls: https://192.9.32.6:2379
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
namespace: kube-system
spec:
containers:
- command:
- etcd
- --advertise-client-urls=https://192.9.32.6:2379
- --cert-file=/etc/kubernetes/pki/etcd/server.crt
- --client-cert-auth=true
- --data-dir=/var/lib/etcd
- --experimental-initial-corrupt-check=true
- --experimental-watch-progress-notify-interval=5s
- --initial-advertise-peer-urls=https://192.9.32.6:2380
- --initial-cluster=controlplane=https://192.9.32.6:2380
- --key-file=/etc/kubernetes/pki/etcd/server.key
- --listen-client-urls=https://127.0.0.1:2379,https://192.9.32.6:2379
- --listen-metrics-urls=http://127.0.0.1:2381
- --listen-peer-urls=https://192.9.32.6:2380
- --name=controlplane
- --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
- --peer-client-cert-auth=true
- --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
- --snapshot-count=10000
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
image: registry.k8s.io/etcd:3.5.15-0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 127.0.0.1
path: /livez
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: etcd
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /readyz
port: 2381
scheme: HTTP
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 100m
memory: 100Mi
startupProbe:
failureThreshold: 24
httpGet:
host: 127.0.0.1
path: /readyz
port: 2381
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /var/lib/etcd
name: etcd-data
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
hostNetwork: true
priority: 2000001000
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/kubernetes/pki/etcd
type: DirectoryOrCreate
name: etcd-certs
- hostPath:
path: /var/lib/etcd
type: DirectoryOrCreate
name: etcd-data
status: {}
etcd.yaml 파일에서 필요한 정보 확인 ( grep file, grep 2379로 확인하면 편함 )
controlplane ~ ✖ export ETCDCTL_API=3
controlplane ~ ➜ ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \
> --cacert=/etc/kubernetes/pki/etcd/ca.crt \
> --cert=/etc/kubernetes/pki/etcd/server.crt \
> --key=/etc/kubernetes/pki/etcd/server.key \
> snapshot save /opt/etcd-backup.db
Snapshot saved at /opt/etcd-backup.db
Pod를 만드세요
Pod named 'redis-storage' created
Pod 'redis-storage' uses Volume type of emptyDir
Pod 'redis-storage' uses volumeMount with mountPath = /data/redis
controlplane ~ ➜ kubectl run redis-storage --image=redis:alpine --dry-run=client -o yaml > ./redis-pod.yaml
controlplane ~ ➜ vim redis-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: redis-storage
name: redis-storage
spec:
containers:
- image: redis:alpine
name: redis-storage
resources: {}
volumeMounts:
- mountPath: /data/redis
name: redis-storage
volumes:
- name: redis-storage
emptyDir: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
controlplane ~ ➜ kubectl apply -f redis-pod.yaml
pod/redis-storage created
Pod를 만드세요
Create a new pod called super-user-pod with image busybox:1.28. Allow the pod to be able to set system_time.
The container should sleep for 4800 seconds.
Pod: super-user-pod
Container Image: busybox:1.28
Is SYS_TIME capability set for the container?
controlplane ~ ➜ kubectl run super-user-pod --image=busybox:1.28 --dry-run=client -o yaml --command -- sleep 4800 > ./super-user-pod.yaml
SYS_TIME 설정 추가
해당 링크 참고
https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
controlplane ~ ➜ vim super-user-pod.yaml
controlplane ~ ➜ cat super-user-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: super-user-pod
name: super-user-pod
spec:
containers:
- command:
- sleep
- "4800"
image: busybox:1.28
name: super-user-pod
securityContext:
capabilities:
add: ["SYS_TIME"]
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
/root/CKA/use-pv.yaml 경로에 pod를 정의한 파일을 생성했습니다. my-pvc pvc를 생성해서 바인딩하세요
mountPath: /data
persistentVolumeClaim Name: my-pvc
persistentVolume Claim configured correctly
pod using the correct mountPath
pod using the persistent volume claim?
get pv로 pv 정보 확인
controlplane ~ ➜ kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
pv-1 10Mi RWO Retain Available <unset> 89s
CAPA 10Mi, ACCESS MODES 는 RWO, STORAGECLASS 없음
controlplane ~ ➜ vim my-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Mi
controlplane ~ ➜ kubectl apply -f my-pvc.yaml
persistentvolumeclaim/my-pvc created
controlplane ~ ➜ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
my-pvc Bound pv-1 10Mi RWO <unset> 5s
controlplane ~ ➜ vim /root/CKA/use-pv.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: use-pv
name: use-pv
spec:
containers:
- image: nginx
name: use-pv
resources: {}
volumeMounts:
- mountPath: "/data"
name: mypd
volumes:
- name: mypd
persistentVolumeClaim:
claimName: my-pvc
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
controlplane ~ ➜ kubectl apply -f /root/CKA/use-pv.yaml
pod/use-pv created
controlplane ~ ✖ kubectl describe po use-pv
Name: use-pv
Namespace: default
Priority: 0
Service Account: default
Node: node01/192.14.160.6
Start Time: Mon, 13 Jan 2025 16:26:14 +0000
Labels: run=use-pv
Annotations: <none>
Status: Running
IP: 10.244.192.3
IPs:
IP: 10.244.192.3
Containers:
use-pv:
Container ID: containerd://63860394eec834cefd3675675a4586a826a51650198295b88e9afbcada6a2de5
Image: nginx
Image ID: docker.io/library/nginx@sha256:42e917aaa1b5bb40dd0f6f7f4f857490ac7747d7ef73b391c774a41a8b994f15
Port: <none>
Host Port: <none>
State: Running
Started: Mon, 13 Jan 2025 16:26:19 +0000
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/data from mypd (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pmws7 (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
mypd:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: my-pvc
ReadOnly: false
kube-api-access-pmws7:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 11s default-scheduler Successfully assigned default/use-pv to node01
Normal Pulling 10s kubelet Pulling image "nginx"
Normal Pulled 6s kubelet Successfully pulled image "nginx" in 3.903s (3.903s including waiting). Image size: 72099410 bytes.
Normal Created 6s kubelet Created container use-pv
Normal Started 6s kubelet Started container use-pv
Create a new deployment called nginx-deploy, with image nginx:1.16 and 1 replica. Next upgrade the deployment to version 1.17 using rolling update.
Deployment : nginx-deploy. Image: nginx:1.16
Image: nginx:1.16
Task: Upgrade the version of the deployment to 1:17
Task: Record the changes for the image upgrade
controlplane ~ ➜ kubectl create deploy nginx-deploy --image=nginx:1.16 --replicas=1 --dry-run=client -o yaml > nginx-deploy.yaml
controlplane ~ ➜ cat nginx-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: nginx-deploy
name: nginx-deploy
spec:
replicas: 1
selector:
matchLabels:
app: nginx-deploy
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: nginx-deploy
spec:
containers:
- image: nginx:1.16
name: nginx
resources: {}
status: {}
controlplane ~ ➜ kubectl set image deploy/nginx-deploy nginx=nginx:1.17
deployment.apps/nginx-deploy image updated
Create a new user called john. Grant him access to the cluster. John should have permission to create, list, get, update and delete pods in the development namespace . The private key exists in the location: /root/CKA/john.key and csr at /root/CKA/john.csr.
Important Note: As of kubernetes 1.19, the CertificateSigningRequest object expects a signerName.
Please refer the documentation to see an example. The documentation tab is available at the top right of terminal.
CSR: john-developer Status:Approved
Role Name: developer, namespace: development, Resource: Pods
Access: User 'john' has appropriate permissions
https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/#create-certificatesigningrequest
controlplane ~ ➜ vim john-develop.yaml
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john-developer
spec:
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZVzVuWld4aE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTByczhJTHRHdTYxakx2dHhWTTJSVlRWMDNHWlJTWWw0dWluVWo4RElaWjBOCnR2MUZtRVFSd3VoaUZsOFEzcWl0Qm0wMUFSMkNJVXBGd2ZzSjZ4MXF3ckJzVkhZbGlBNVhwRVpZM3ExcGswSDQKM3Z3aGJlK1o2MVNrVHF5SVBYUUwrTWM5T1Nsbm0xb0R2N0NtSkZNMUlMRVI3QTVGZnZKOEdFRjJ6dHBoaUlFMwpub1dtdHNZb3JuT2wzc2lHQ2ZGZzR4Zmd4eW8ybmlneFNVekl1bXNnVm9PM2ttT0x1RVF6cXpkakJ3TFJXbWlECklmMXBMWnoyalVnald4UkhCM1gyWnVVV1d1T09PZnpXM01LaE8ybHEvZi9DdS8wYk83c0x0MCt3U2ZMSU91TFcKcW90blZtRmxMMytqTy82WDNDKzBERHk5aUtwbXJjVDBnWGZLemE1dHJRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBR05WdmVIOGR4ZzNvK21VeVRkbmFjVmQ1N24zSkExdnZEU1JWREkyQTZ1eXN3ZFp1L1BVCkkwZXpZWFV0RVNnSk1IRmQycVVNMjNuNVJsSXJ3R0xuUXFISUh5VStWWHhsdnZsRnpNOVpEWllSTmU3QlJvYXgKQVlEdUI5STZXT3FYbkFvczFqRmxNUG5NbFpqdU5kSGxpT1BjTU1oNndLaTZzZFhpVStHYTJ2RUVLY01jSVUyRgpvU2djUWdMYTk0aEpacGk3ZnNMdm1OQUxoT045UHdNMGM1dVJVejV4T0dGMUtCbWRSeEgvbUNOS2JKYjFRQm1HCkkwYitEUEdaTktXTU0xMzhIQXdoV0tkNjVoVHdYOWl4V3ZHMkh4TG1WQzg0L1BHT0tWQW9FNkpsYWFHdTlQVmkKdjlOSjVaZlZrcXdCd0hKbzZXdk9xVlA3SVFjZmg3d0drWm89Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=
signerName: kubernetes.io/kube-apiserver-client
expirationSeconds: 86400 # one day
usages:
- client auth
request 부분 바꿔줘야함
controlplane ~ ➜ cat /root/CKA/john.csr | base64 | tr -d "\n"
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQU01WHVlTFE3L0lYZXB1SHdwby92NklsTTA5V0JsM0pBNlVUaHFyTUl0ZE8yU3cyCkhPTllmUGF0emdJd3JxVTJqSERCczN6eG5tWWFYaEc5emQyMFFTQ05GRGVYQkdvTzExTW1LcnRTWUZCdktMczMKaUN0STFoS3RVajZpandPMWxlYi9FMk91SlQwTk5BVXpZVkZVcEJFNHJRNVRrekVQRk82OEYxUjFUaHl0cTJFRwpDa2ZRcjVnMmpXbERQeWRudUFqOUdUcnM4MXVGR042VHVDQ1VSa3BJcFltaHlPTDdmQW9qS1RHeUxOSVJZTkJICmxCalJhNlJ0N0ZLOTdoT2NjaDAwcUVNV3VxQlhLNHc2a0NqWmN0Y3N1Q0FhR0R6dXVQRHRvS1VTa1RON0VqOGgKVWRPQ0RZMWhuWkZocitUVW5qY1hJdzJpU0hURE9NbVdYdW1CTURFQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRREdmS1B6WnZnaUZ4aGV0UFdSK1N4Tm5OaThVVWZwa2ZCZVJSTTVudnpyU0FhMU4vbWw3MHlmCnhZekZBbkNvbTAzbXE3TUhwSUYvOXVPNTBOTUNyYXgyeitIc1RVWERoMk5hWG5kK3FibE56U0Y0cDQzZm12RWQKNmMwSDV1Yml1NURYZklhdlEybktrYTlDVldKaStZYTdpaHBxWnhHbmtrSDFlVVBucnpGZ1pxUVJPNEF5ZHZLNAo2NXpXOE9SM2NRUUd3N1hNWEQ4S0k5L3FLRDZOWVFySWxUdHhwcjlOalZ6aTFNM3RDSG5YUWwxQ2k4VUJqNktaCk9Db3Vvb0NvSE0yQnZVNzJmUHNhSCtWZ1ZwRXlsaEJUZks2MS93RlJMNlhaeWEzRjk4ZkN2YTBwbURVVDFaTUYKVWNKWWxWYmhnUXp6WFF5QmVSS1lkU2VrN2lOL1Y3Ni8KLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
controlplane ~ ➜ vim john-develop.yaml
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john-developer
spec:
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbTlvYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQU01WHVlTFE3L0lYZXB1SHdwby92NklsTTA5V0JsM0pBNlVUaHFyTUl0ZE8yU3cyCkhPTllmUGF0emdJd3JxVTJqSERCczN6eG5tWWFYaEc5emQyMFFTQ05GRGVYQkdvTzExTW1LcnRTWUZCdktMczMKaUN0STFoS3RVajZpandPMWxlYi9FMk91SlQwTk5BVXpZVkZVcEJFNHJRNVRrekVQRk82OEYxUjFUaHl0cTJFRwpDa2ZRcjVnMmpXbERQeWRudUFqOUdUcnM4MXVGR042VHVDQ1VSa3BJcFltaHlPTDdmQW9qS1RHeUxOSVJZTkJICmxCalJhNlJ0N0ZLOTdoT2NjaDAwcUVNV3VxQlhLNHc2a0NqWmN0Y3N1Q0FhR0R6dXVQRHRvS1VTa1RON0VqOGgKVWRPQ0RZMWhuWkZocitUVW5qY1hJdzJpU0hURE9NbVdYdW1CTURFQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRREdmS1B6WnZnaUZ4aGV0UFdSK1N4Tm5OaThVVWZwa2ZCZVJSTTVudnpyU0FhMU4vbWw3MHlmCnhZekZBbkNvbTAzbXE3TUhwSUYvOXVPNTBOTUNyYXgyeitIc1RVWERoMk5hWG5kK3FibE56U0Y0cDQzZm12RWQKNmMwSDV1Yml1NURYZklhdlEybktrYTlDVldKaStZYTdpaHBxWnhHbmtrSDFlVVBucnpGZ1pxUVJPNEF5ZHZLNAo2NXpXOE9SM2NRUUd3N1hNWEQ4S0k5L3FLRDZOWVFySWxUdHhwcjlOalZ6aTFNM3RDSG5YUWwxQ2k4VUJqNktaCk9Db3Vvb0NvSE0yQnZVNzJmUHNhSCtWZ1ZwRXlsaEJUZks2MS93RlJMNlhaeWEzRjk4ZkN2YTBwbURVVDFaTUYKVWNKWWxWYmhnUXp6WFF5QmVSS1lkU2VrN2lOL1Y3Ni8KLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg==
signerName: kubernetes.io/kube-apiserver-client
expirationSeconds: 86400 # one day
usages:
- client auth
controlplane ~ ➜ kubectl get csr
NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION
csr-ljt6x 22m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane <none> Approved,Issued
csr-mdswz 21m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:rs8fap <none> Approved,Issued
john-developer 4s kubernetes.io/kube-apiserver-client kubernetes-admin 24h Pending
csr을 승인해주자
controlplane ~ ✖ kubectl certificate approve john-developer
certificatesigningrequest.certificates.k8s.io/john-developer approved
controlplane ~ ➜ kubectl get csr
NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION
csr-ljt6x 23m kubernetes.io/kube-apiserver-client-kubelet system:node:controlplane <none> Approved,Issued
csr-mdswz 22m kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:rs8fap <none> Approved,Issued
john-developer 36s kubernetes.io/kube-apiserver-client kubernetes-admin 24h Approved,Issued
development 네임스페이스에 롤 생성
controlplane ~ ➜ kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods -n development
role.rbac.authorization.k8s.io/developer created
controlplane ~ ➜ kubectl get role -n development
NAME CREATED AT
developer 2025-01-14T17:25:05Z
귀찮으니 네임스페이스 옮기고
controlplane ~ ➜ kubectl config set-context --current --namespace=development
Context "kubernetes-admin@kubernetes" modified.
controlplane ~ ➜ kubectl delete rolebinding developer-binding-myuser
rolebinding.rbac.authorization.k8s.io "developer-binding-myuser" deleted
controlplane ~ ➜ kubectl create rolebinding john-developer --role=developer --user=john -n development
rolebinding.rbac.authorization.k8s.io/john-developer created
john 롤바인딩까지 생성
controlplane ~ ➜ kubectl auth can-i create pods --as john
yes
controlplane ~ ➜ kubectl auth can-i delete pods --as john
yes
controlplane ~ ➜ kubectl auth can-i list pods --as john
yes
controlplane ~ ➜ kubectl auth can-i update pods --as john
yes
controlplane ~ ➜ kubectl auth can-i get pods --as john
yes
auth 확인
Create a nginx pod called nginx-resolver using image nginx, expose it internally with a service called nginx-resolver-service. Test that you are able to look up the service and pod names from within the cluster. Use the image: busybox:1.28 for dns lookup. Record results in /root/CKA/nginx.svc and /root/CKA/nginx.pod
Pod: nginx-resolver created
Service DNS Resolution recorded correctly
Pod DNS resolution recorded correctly
controlplane ~ ➜ kubectl config set-context --current --namespace=default
Context "kubernetes-admin@kubernetes" modified.
파드 생성
controlplane ~ ➜ kubectl run nginx-resolver --image=nginx
pod/nginx-resolver created
서비스 생성
controlplane ~ ➜ kubectl expose po nginx-resolver --name=nginx-resolver-service --port 80
service/nginx-resolver-service exposed
controlplane ~ ➜ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 31m
nginx-resolver-service ClusterIP 10.111.187.111 <none> 80/TCP 3s
nslookup 할 파드 생성
controlplane ~ ➜ kubectl run busybox --image=busybox:1.28 --command -- sleep 4800
pod/busybox created
nslookup exec 테스트
controlplane ~ ➜ kubectl exec busybox -- nslookup nginx-resolver-service
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-systehttp://m.svc.cluster.local
Name: nginx-resolver-service
Address 1: 10.111.187.111 nginx-resolver-service.default.svc.cluster.local
결과값 지정된 파일로 저장
controlplane ~ ➜ kubectl exec busybox -- nslookup nginx-resolver-service > /root/CKA/nginx.svc
controlplane ~ ➜ cat /root/CKA/nginx.svc
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-systehttp://m.svc.cluster.local
Name: nginx-resolver-service
Address 1: 10.111.187.111 nginx-resolver-service.default.svc.cluster.local
해당 페이지 참고 Pod DNS name 확인
https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
controlplane ~ ➜ kubectl get po -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
busybox 1/1 Running 0 2m20s 10.244.192.6 node01 <none> <none>
nginx-deploy-6dbf8cb9c8-ppcmm 1/1 Running 0 15m 10.244.192.5 node01 <none> <none>
nginx-resolver 1/1 Running 0 3m42s 10.244.192.4 node01 <none> <none>
redis-storage 1/1 Running 0 19m 10.244.192.1 node01 <none> <none>
super-user-pod 1/1 Running 0 18m 10.244.192.2 node01 <none> <none>
use-pv 1/1 Running 0 16m 10.244.192.3 node01 <none> <none>
nginx-resolver의 IP는 10.244.192.4
nslookup 테스트
controlplane ~ ➜ kubectl exec busybox -- nslookup 10-244-192-4.default.pod.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-systehttp://m.svc.cluster.local
Name: 10-244-192-4.default.pod.cluster.local
Address 1: 10.244.192.4 10-244-192-4.nginx-resolver-service.default.svc.cluster.local
결과값 지정된 파일로 저장
controlplane ~ ➜ kubectl exec busybox -- nslookup 10-244-192-4.default.pod.cluster.local > /root/CKA/nginx.pod
controlplane ~ ➜ cat /root/CKA/nginx.pod
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-systehttp://m.svc.cluster.local
Name: 10-244-192-4.default.pod.cluster.local
Address 1: 10.244.192.4 10-244-192-4.nginx-resolver-service.default.svc.cluster.local
Create a static pod on node01 called nginx-critical with image nginx and make sure that it is recreated/restarted automatically in case of a failure.
Use /etc/kubernetes/manifests as the Static Pod path for example.
static pod configured under /etc/kubernetes/manifests ?
Pod nginx-critical-node01 is up and running
controlplane ~ ✖ kubectl run nginx-critical --image=nginx --dry-run=client -o yaml > nginx-critical.yaml
controlplane ~ ➜ cat nginx-critical.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: nginx-critical
name: nginx-critical
spec:
containers:
- image: nginx
name: nginx-critical
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
yaml 파일 생성 후 복사 ( 요구사항인 restartPolicy: Always 중요 )
controlplane ~ ➜ ssh node01
Welcome to Ubuntu 22.04.4 LTS (GNU/Linux 5.4.0-1106-gcp x86_64)
* Documentation: https://help.ubuntu.com
* Management: https://landscape.canonical.com
* Support: https://ubuntu.com/pro
This system has been minimized by removing packages and content that are
not required on a system that users do not log into.
To restore this content, you can run the 'unminimize' command.
Last login: Tue Jan 14 17:37:18 2025 from 192.15.163.10
node01 ~ ➜ cd /etc/kubernetes/manifests/
node01 /etc/kubernetes/manifests ➜ vim nginx-critical.yaml
node01 /etc/kubernetes/manifests ➜ exit
logout
Connection to node01 closed.
controlplane ~ ➜ kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default busybox 1/1 Running 0 5m44s
default nginx-critical-node01 1/1 Running 0 5s
default nginx-deploy-6dbf8cb9c8-ppcmm 1/1 Running 0 18m
default nginx-resolver 1/1 Running 0 7m6s
default redis-storage 1/1 Running 0 23m
default super-user-pod 1/1 Running 0 21m
default use-pv 1/1 Running 0 19m
kube-system coredns-77d6fd4654-jwwnb 1/1 Running 0 38m
kube-system coredns-77d6fd4654-x4htk 1/1 Running 0 38m
kube-system etcd-controlplane 1/1 Running 0 38m
kube-system kube-apiserver-controlplane 1/1 Running 0 38m
kube-system kube-controller-manager-controlplane 1/1 Running 0 38m
kube-system kube-proxy-8d7lg 1/1 Running 0 38m
kube-system kube-proxy-vn8vf 1/1 Running 0 37m
kube-system kube-scheduler-controlplane 1/1 Running 0 38m
kube-system weave-net-prspl 2/2 Running 1 (37m ago) 38m
kube-system weave-net-sgfd8 2/2 Running 0 37m
node01 /etc/kubernetes/manifests 경로에 yaml 파일 붙여넣기 후 node01의 static 파드 생성 확인