From f0ff85e7166f286cf7ef390d5d834db64ac1eb6e Mon Sep 17 00:00:00 2001 From: Shinminjin Date: Sun, 2 Mar 2025 06:22:48 +0900 Subject: [PATCH] =?UTF-8?q?docs:=20=F0=9F=93=B0=20[#4.=20eks]=20aews=204?= =?UTF-8?q?=EC=A3=BC=EC=B0=A8=20=EC=A0=95=EB=A6=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- _posts/aews/2025-03-01-aews04.md | 5349 ++++++++++++++++++++++++++++++ 1 file changed, 5349 insertions(+) create mode 100644 _posts/aews/2025-03-01-aews04.md diff --git a/_posts/aews/2025-03-01-aews04.md b/_posts/aews/2025-03-01-aews04.md new file mode 100644 index 0000000..48bf392 --- /dev/null +++ b/_posts/aews/2025-03-01-aews04.md @@ -0,0 +1,5349 @@ +--- +title: AEWS 4์ฃผ์ฐจ ์ •๋ฆฌ +date: 2025-03-01 18:30:00 +0900 +categories: [EKS] +tags: [AEWS] +--- + +## **๐Ÿš€ ์‹ค์Šต ํ™˜๊ฒฝ ๋ฐฐํฌ** + +![Image](https://github.com/user-attachments/assets/d725edba-3e35-4f55-b1b4-5e1c09fe013c) + +## **๐Ÿ—๏ธ AWS CloudFormation์„ ํ†ตํ•ด ๊ธฐ๋ณธ ์‹ค์Šต ํ™˜๊ฒฝ ๋ฐฐํฌ** + +### **1. yaml ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ** + +```bash +curl -O https://s3.ap-northeast-2.amazonaws.com/cloudformation.cloudneta.net/K8S/myeks-4week.yaml +# ๊ฒฐ๊ณผ + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:--100 21983 100 21983 0 0 201k 0 --:--:-- --:--:-- --:--:-- 202k +``` + +### **2. ๋ณ€์ˆ˜ ์ง€์ •** + +```bash +CLUSTER_NAME=myeks +SSHKEYNAME=kp-aews # SSH ํ‚คํŽ˜์–ด ์ด๋ฆ„ +MYACCESSKEY=XXXXXXXXXXXXXXXXXX # IAM User ์•ก์„ธ์Šค ํ‚ค +MYSECRETKEY=XXXXXXXXXXXXXXXXXX # IAM User ์‹œํฌ๋ฆฟ ํ‚ค +WorkerNodeInstanceType=t3.medium # ์›Œ์ปค๋…ธ๋“œ ์ธ์Šคํ„ด์Šค ํƒ€์ž… +``` + +### **3. CloudFormation ์Šคํƒ ๋ฐฐํฌ** + +```bash +aws cloudformation deploy --template-file myeks-4week.yaml --stack-name $CLUSTER_NAME --parameter-overrides KeyName=$SSHKEYNAME SgIngressSshCidr=$(curl -s ipinfo.io/ip)/32 MyIamUserAccessKeyID=$MYACCESSKEY MyIamUserSecretAccessKey=$MYSECRETKEY ClusterBaseName=$CLUSTER_NAME WorkerNodeInstanceType=$WorkerNodeInstanceType --region ap-northeast-2 + +# ๊ฒฐ๊ณผ +Waiting for changeset to be created.. +Waiting for stack create/update to complete +Successfully created/updated stack - myeks +``` + +### **4. CloudFormation ์Šคํƒ ๋ฐฐํฌ ์™„๋ฃŒ ํ›„ ์ž‘์—…์šฉ EC2 IP ์ถœ๋ ฅ** + +```bash +aws cloudformation describe-stacks --stack-name myeks --query 'Stacks[*].Outputs[0].OutputValue' --output text +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +13.124.11.68 +``` + +### **5. ๋ฐฐํฌ ๊ณผ์ • ์‚ดํŽด๋ณด๊ธฐ** + +**(1) ์šด์˜์„œ๋ฒ„ EC2 SSH ์ ‘์†** + +```bash +ssh -i kp-aews.pem ec2-user@$(aws cloudformation describe-stacks --stack-name myeks --query 'Stacks[*].Outputs[0].OutputValue' --output text) + +The authenticity of host '13.124.11.68 (13.124.11.68)' can't be established. +ED25519 key fingerprint is SHA256:GaT1nuQgtyirycvJg2yQG/bVRT87T7sukVFzeOtkySk. +This key is not known by any other names. +Are you sure you want to continue connecting (yes/no/[fingerprint])? yes +Warning: Permanently added '13.124.11.68' (ED25519) to the list of known hosts. + , #_ + ~\_ ####_ Amazon Linux 2 + ~~ \_#####\ + ~~ \###| AL2 End of Life is 2026-06-30. + ~~ \#/ ___ + ~~ V~' '-> + ~~~ / A newer version of Amazon Linux is available! + ~~._. _/ + _/ _/ Amazon Linux 2023, GA and supported until 2028-03-15. + _/m/' https://aws.amazon.com/linux/amazon-linux-2023/ + +[root@operator-host ~]# +``` + +**(2) AWS CLI ์ž๊ฒฉ์ฆ๋ช… ์„ค์ •** + +```bash +[root@operator-host ~]# aws configure +AWS Access Key ID [None]: XXXXXXXXXXXXXXXXXX +AWS Secret Access Key [None]: XXXXXXXXXXXXXXXXXX +Default region name [None]: ap-northeast-2 +Default output format [None]: json +``` + +**(3) ๋ฃจํŠธ ์‚ฌ์šฉ์ž ๋ฐ ํ™ˆ ๋””๋ ‰ํ† ๋ฆฌ ํ™•์ธ ์ž‘์—…** + +```bash +[root@operator-host ~]# whoami +root +``` + +```bash +[root@operator-host ~]# pwd +/root +``` + +**(4) cloud-init ์‹คํ–‰ ๊ณผ์ • ๋กœ๊ทธ ํ™•์ธ** + +```bash +[root@operator-host ~]# tail -f /var/log/cloud-init-output.log +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +LICENSE +README.md +kubecolor + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:--100 97 100 97 0 0 260 0 --:--:-- --:--:-- --:--:-- 260 + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 + 0 9.9M 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:--100 9.9M 100 9.9M 0 0 5993k 0 0:00:01 0:00:01 --:--:-- 136M +Userdata End! +Cloud-init v. 19.3-46.amzn2.0.4 finished at Tue, 25 Feb 2025 12:58:35 +0000. Datasource DataSourceEc2. Up 86.51 seconds +``` + +**(5) eks ์„ค์ • ํŒŒ์ผ ํ™•์ธ** + +```bash +[root@operator-host ~]# cat myeks.yaml +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: myeks + region: ap-northeast-2 + version: "1.31" + +iam: + withOIDC: true + + serviceAccounts: + - metadata: + name: aws-load-balancer-controller + namespace: kube-system + wellKnownPolicies: + awsLoadBalancerController: true + +vpc: + cidr: 192.168.0.0/16 + clusterEndpoints: + privateAccess: true + publicAccess: true + id: vpc-017a9a38a294509ea + subnets: + public: + ap-northeast-2a: + az: ap-northeast-2a + cidr: 192.168.1.0/24 + id: subnet-011d8d6df3bab1c31 + ap-northeast-2b: + az: ap-northeast-2b + cidr: 192.168.2.0/24 + id: subnet-004ed4a345eecd440 + ap-northeast-2c: + az: ap-northeast-2c + cidr: 192.168.3.0/24 + id: subnet-068e9402c8bb97c66 + +addons: + - name: vpc-cni # no version is specified so it deploys the default version + version: latest # auto discovers the latest available + attachPolicyARNs: # attach IAM policies to the add-on's service account + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + configurationValues: |- + enableNetworkPolicy: "true" + + - name: kube-proxy + version: latest + + - name: coredns + version: latest + + - name: metrics-server + version: latest + + - name: aws-ebs-csi-driver + version: latest + wellKnownPolicies: + ebsCSIController: true + +managedNodeGroups: +- amiFamily: AmazonLinux2023 + desiredCapacity: 3 + iam: + withAddonPolicies: + certManager: true + externalDNS: true + instanceType: t3.medium + preBootstrapCommands: + # install additional packages + - "dnf install nvme-cli links tree tcpdump sysstat ipvsadm ipset bind-utils htop -y" + labels: + alpha.eksctl.io/cluster-name: myeks + alpha.eksctl.io/nodegroup-name: ng1 + maxPodsPerNode: 60 + maxSize: 3 + minSize: 3 + name: ng1 + ssh: + allow: true + publicKeyName: kp-aews + tags: + alpha.eksctl.io/nodegroup-name: ng1 + alpha.eksctl.io/nodegroup-type: managed + volumeIOPS: 3000 + volumeSize: 60 + volumeThroughput: 125 + volumeType: gp3 +``` + +**(6) cloud-init ์ •์ƒ ์™„๋ฃŒ ํ›„ eksctl ์‹คํ–‰ ๊ณผ์ • ๋กœ๊ทธ ํ™•์ธ** + +```bash +[root@operator-host ~]# more create-eks.log +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-25 21:57:51 [โ–ถ] Setting credentials expiry window to 30 minutes +2025-02-25 21:57:51 [โ–ถ] role ARN for the current session is "arn:aws:iam::378102432899:user/eks-user" +2025-02-25 21:57:52 [โ„น] eksctl version 0.204.0 +2025-02-25 21:57:52 [โ„น] using region ap-northeast-2 +2025-02-25 21:57:52 [โœ”] using existing VPC (vpc-017a9a38a294509ea) and subnets (private:map[] public:map[ap-northeast-2a:{subnet-011d8d6df3bab1c31 ap-northeast-2a 192.168.1.0/24 0 } ap-northeast-2b:{subnet-004ed4a345eecd440 ap-northeast-2b 192.168.2.0/24 0 } ap-northeast-2c:{subnet-068e9402c8bb97c66 ap-northeast-2c 192.168.3.0/24 0 }]) +2025-02-25 21:57:52 [!] custom VPC/subnets will be used; if resulting cluster doesn't function as expected, make sure to review the configuration of VPC/subnets +2025-02-25 21:57:52 [โ„น] nodegroup "ng1" will use "" [AmazonLinux2023/1.31] +2025-02-25 21:57:52 [โ„น] using EC2 key pair "kp-aews" +2025-02-25 21:57:52 [โ„น] using Kubernetes version 1.31 +2025-02-25 21:57:52 [โ„น] creating EKS cluster "myeks" in "ap-northeast-2" region with managed nodes +2025-02-25 21:57:52 [โ–ถ] cfg.json = \ +{ + "kind": "ClusterConfig", + "apiVersion": "eksctl.io/v1alpha5", + "metadata": { + "name": "myeks", + "region": "ap-northeast-2", + "version": "1.31" + }, + "iam": { + "withOIDC": true, + "serviceAccounts": [ + { + "metadata": { + "name": "aws-load-balancer-controller", + "namespace": "kube-system" + }, + "wellKnownPolicies": { + "imageBuilder": false, + "autoScaler": false, + "awsLoadBalancerController": true, + "externalDNS": false, + "certManager": false, + "ebsCSIController": false, + "efsCSIController": false + } + } + ], + "vpcResourceControllerPolicy": true + }, + "accessConfig": { + "authenticationMode": "API_AND_CONFIG_MAP" + }, + "vpc": { + "id": "vpc-017a9a38a294509ea", + "cidr": "192.168.0.0/16", + "subnets": { + "public": { + "ap-northeast-2a": { + "id": "subnet-011d8d6df3bab1c31", + "az": "ap-northeast-2a", + "cidr": "192.168.1.0/24" + }, + "ap-northeast-2b": { + "id": "subnet-004ed4a345eecd440", + "az": "ap-northeast-2b", + "cidr": "192.168.2.0/24" + }, + "ap-northeast-2c": { + "id": "subnet-068e9402c8bb97c66", + "az": "ap-northeast-2c", + "cidr": "192.168.3.0/24" +--More--(10%) +``` + +**(7) ์…ธ ์ข…๋ฃŒ ๋ช…๋ น ์‹คํ–‰** + +```bash +[root@operator-host ~]# exit +``` + +## ๐Ÿš€ **AWS EKS ์„ค์น˜ ํ™•์ธ (์Šคํƒ ์ƒ์„ฑ ์‹œ์ž‘ ํ›„ ์•ฝ 20๋ถ„ ๊ฒฝ๊ณผ)** + +### **1. eksctl ํด๋Ÿฌ์Šคํ„ฐ ์กฐํšŒ** + +```bash +eksctl get cluster +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME REGION EKSCTL CREATED +myeks ap-northeast-2 True +``` + +### **2. ํด๋Ÿฌ์Šคํ„ฐ ๋…ธ๋“œ ๊ทธ๋ฃน ์กฐํšŒ** + +```bash +eksctl get nodegroup --cluster $CLUSTER_NAME +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +CLUSTER NODEGROUP STATUS CREATED MIN SIZEMAX SIZE DESIRED CAPACITY INSTANCE TYPE IMAGE ID ASG NAME TYPE +myeks ng1 CREATING 2025-02-25T13:12:08Z 3 3 3 t3.medium AL2023_x86_64_STANDARD managed +``` + +### **3. ํด๋Ÿฌ์Šคํ„ฐ ์• ๋“œ์˜จ ์กฐํšŒ** + +```bash +eksctl get addon --cluster $CLUSTER_NAME +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-25 22:13:56 [โ„น] Kubernetes version "1.31" in use by cluster "myeks" +2025-02-25 22:13:56 [โ„น] getting all addons +2025-02-25 22:13:57 [โ„น] to see issues for an addon run `eksctl get addon --name --cluster ` +NAME VERSION STATUS ISSUES IAMROLEUPDATE AVAILABLE CONFIGURATION VALUES POD IDENTITY ASSOCIATION ROLES +coredns v1.11.4-eksbuild.2 DEGRADED 1 +kube-proxy v1.31.3-eksbuild.2 ACTIVE 0 +metrics-server v0.7.2-eksbuild.2 DEGRADED 1 +vpc-cni v1.19.2-eksbuild.5 ACTIVE 0 arn:aws:iam::378102432899:role/eksctl-myeks-addon-vpc-cni-Role1-Q7K66W6aHXAn enableNetworkPolicy: "true" +``` + +### **4. ํด๋Ÿฌ์Šคํ„ฐ IAM ์„œ๋น„์Šค ๊ณ„์ • ์กฐํšŒ** + +```bash +eksctl get iamserviceaccount --cluster $CLUSTER_NAME +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAMESPACE NAME ROLE ARN +kube-system aws-load-balancer-controller arn:aws:iam::378102432899:role/eksctl-myeks-addon-iamserviceaccount-kube-sys-Role1-RDjfak64nvXd +``` + +### **5. kubeconfig ์ƒ์„ฑ** + +**(1) ์ž๊ฒฉ์ฆ๋ช… ์‚ฌ์šฉ์ž ํ™•์ธ** + +```bash +aws sts get-caller-identity --query Arn +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +"arn:aws:iam::378102432899:user/eks-user" +``` + +**(2) kubeconfig ์—…๋ฐ์ดํŠธ ๋ช…๋ น ์‹คํ–‰** + +```bash +aws eks update-kubeconfig --name myeks --user-alias eks-user # ์ถœ๋ ฅ๋œ ์ž๊ฒฉ์ฆ๋ช… ์‚ฌ์šฉ์ž + +# ๊ฒฐ๊ณผ +Added new context eks-user to /home/devshin/.kube/config +``` + +### **6. Kubernetes ํด๋Ÿฌ์Šคํ„ฐ ๋ฐ ๋ฆฌ์†Œ์Šค ์ƒํƒœ ํ™•์ธ** + +**(1) ํด๋Ÿฌ์Šคํ„ฐ ์ •๋ณด ์กฐํšŒ** + +```bash +kubectl cluster-info +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Kubernetes control plane is running at https://79E4B0C88ABCA6E051CDC256189CC3B2.gr7.ap-northeast-2.eks.amazonaws.com +CoreDNS is running at https://79E4B0C88ABCA6E051CDC256189CC3B2.gr7.ap-northeast-2.eks.amazonaws.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. +``` + +**(2) ๊ธฐ๋ณธ ๋„ค์ž„์ŠคํŽ˜์ด์Šค ์„ค์ •** + +```bash +kubectl ns default +``` + +**(3) ๋…ธ๋“œ ์ •๋ณด ์กฐํšŒ** + +- **๋””๋ฒ„๊ทธ ๋ ˆ๋ฒจ 6์œผ๋กœ ๋…ธ๋“œ ์ •๋ณด ์กฐํšŒ** + +```bash +kubectl get node -v6 +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +I0225 22:24:07.471314 48192 loader.go:402] Config loaded from file: /home/devshin/.kube/config +I0225 22:24:07.471812 48192 envvar.go:172] "Feature gate default state" feature="ClientsAllowCBOR" enabled=false +I0225 22:24:07.471828 48192 envvar.go:172] "Feature gate default state" feature="ClientsPreferCBOR" enabled=false +I0225 22:24:07.471837 48192 envvar.go:172] "Feature gate default state" feature="InformerResourceVersion" enabled=false +I0225 22:24:07.471844 48192 envvar.go:172] "Feature gate default state" feature="WatchListClient" enabled=false +I0225 22:24:07.905428 48192 round_trippers.go:560] GET https://79E4B0C88ABCA6E051CDC256189CC3B2.gr7.ap-northeast-2.eks.amazonaws.com/api/v1/nodes?limit=500 200 OK in 427 milliseconds +NAME STATUS ROLES AGE VERSION +ip-192-168-1-51.ap-northeast-2.compute.internal Ready 10m v1.31.5-eks-5d632ec +ip-192-168-2-42.ap-northeast-2.compute.internal Ready 10m v1.31.5-eks-5d632ec +ip-192-168-3-30.ap-northeast-2.compute.internal Ready 10m v1.31.5-eks-5d632ec +``` + +- **์ธ์Šคํ„ด์Šค ์œ ํ˜•, ์šฉ๋Ÿ‰ ์œ ํ˜•, ๊ฐ€์šฉ ์˜์—ญ ๋ผ๋ฒจ ์ •๋ณด ์ƒ์„ธ ์กฐํšŒ** + +```bash +kubectl get node --label-columns=node.kubernetes.io/instance-type,eks.amazonaws.com/capacityType,topology.kubernetes.io/zone +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME STATUS ROLES AGE VERSION INSTANCE-TYPE CAPACITYTYPE ZONE +ip-192-168-1-51.ap-northeast-2.compute.internal Ready 12m v1.31.5-eks-5d632ec t3.medium ON_DEMAND ap-northeast-2a +ip-192-168-2-42.ap-northeast-2.compute.internal Ready 12m v1.31.5-eks-5d632ec t3.medium ON_DEMAND ap-northeast-2b +ip-192-168-3-30.ap-northeast-2.compute.internal Ready 12m v1.31.5-eks-5d632ec t3.medium ON_DEMAND ap-northeast-2c +``` + +**(4) ํŒŒ๋“œ ์ •๋ณด ์กฐํšŒ** + +```bash +kubectl get pod -A +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system aws-node-p4v96 2/2 Running 0 12m +kube-system aws-node-qgc5t 2/2 Running 0 12m +kube-system aws-node-r5nbp 2/2 Running 0 12m +kube-system coredns-86f5954566-c8wl2 1/1 Running 0 18m +kube-system coredns-86f5954566-d6vwh 1/1 Running 0 18m +kube-system ebs-csi-controller-7f8f8cb84-p57xw 6/6 Running 0 10m +kube-system ebs-csi-controller-7f8f8cb84-z4t4z 6/6 Running 0 10m +kube-system ebs-csi-node-gdh58 3/3 Running 0 10m +kube-system ebs-csi-node-hx5jb 3/3 Running 0 10m +kube-system ebs-csi-node-j46zg 3/3 Running 0 10m +kube-system kube-proxy-s6tdr 1/1 Running 0 12m +kube-system kube-proxy-v8nh9 1/1 Running 0 12m +kube-system kube-proxy-z9l58 1/1 Running 0 12m +kube-system metrics-server-6bf5998d9c-c8tbf 1/1 Running 0 18m +kube-system metrics-server-6bf5998d9c-tftq9 1/1 Running 0 18m +``` + +**(5) ํŒŒ๋“œ ์ค‘๋‹จ ํ—ˆ์šฉ(PDB) ์กฐํšŒ** + +```bash +kubectl get pdb -n kube-system +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +coredns N/A 1 1 18m +ebs-csi-controller N/A 1 1 10m +metrics-server N/A 1 1 18m +``` + +### **7. krew ํ”Œ๋Ÿฌ๊ทธ์ธ ํ™•์ธ** + +```bash +kubectl krew list +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +PLUGIN VERSION +ctx v0.9.5 +df-pv v0.3.0 +get-all v1.3.8 +krew v0.4.4 +neat v2.0.4 +stern v1.32.0 +``` + +## **๐Ÿ”Œ ๋…ธ๋“œ IP ์ •๋ณด ํ™•์ธ ๋ฐ SSH ์ ‘์†** + +### **1. EC2 ๊ณต์ธ IP ๋ณ€์ˆ˜ ์ง€์ •** + +```bash +export N1=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=myeks-ng1-Node" "Name=availability-zone,Values=ap-northeast-2a" --query 'Reservations[*].Instances[*].PublicIpAddress' --output text) +export N2=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=myeks-ng1-Node" "Name=availability-zone,Values=ap-northeast-2b" --query 'Reservations[*].Instances[*].PublicIpAddress' --output text) +export N3=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=myeks-ng1-Node" "Name=availability-zone,Values=ap-northeast-2c" --query 'Reservations[*].Instances[*].PublicIpAddress' --output text) +echo $N1, $N2, $N3 +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +15.164.227.37, 3.38.205.159, 43.200.163.0 +``` + +### **2. EC2 ๋ณด์•ˆ ๊ทธ๋ฃน ์กฐํšŒ (remoteAccess ํ•„ํ„ฐ ์ ์šฉ)** + +```bash +aws ec2 describe-security-groups --filters "Name=group-name,Values=*remoteAccess*" | jq +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +{ + "SecurityGroups": [ + { + "GroupId": "sg-0387b57d9b1586fe5", + "IpPermissionsEgress": [ + { + "IpProtocol": "-1", + "UserIdGroupPairs": [], + "IpRanges": [ + { + "CidrIp": "0.0.0.0/0" + } + ], + "Ipv6Ranges": [], + "PrefixListIds": [] + } + ], + "Tags": [ + { + "Key": "Name", + "Value": "eksctl-myeks-nodegroup-ng1/SSH" + }, + { + "Key": "alpha.eksctl.io/cluster-name", + "Value": "myeks" + }, + { + "Key": "alpha.eksctl.io/eksctl-version", + "Value": "0.204.0" + }, + { + "Key": "alpha.eksctl.io/nodegroup-name", + "Value": "ng1" + }, + { + "Key": "eksctl.cluster.k8s.io/v1alpha1/cluster-name", + "Value": "myeks" + }, + { + "Key": "aws:cloudformation:stack-id", + "Value": "arn:aws:cloudformation:ap-northeast-2:378102432899:stack/eksctl-myeks-nodegroup-ng1/0e0c9500-f37a-11ef-b856-0237bb259921" + }, + { + "Key": "alpha.eksctl.io/nodegroup-type", + "Value": "managed" + }, + { + "Key": "aws:cloudformation:stack-name", + "Value": "eksctl-myeks-nodegroup-ng1" + }, + { + "Key": "aws:cloudformation:logical-id", + "Value": "SSH" + } + ], + "VpcId": "vpc-017a9a38a294509ea", + "SecurityGroupArn": "arn:aws:ec2:ap-northeast-2:378102432899:security-group/sg-0387b57d9b1586fe5", + "OwnerId": "378102432899", + "GroupName": "eksctl-myeks-nodegroup-ng1-remoteAccess", + "Description": "Allow SSH access", + "IpPermissions": [ + { + "IpProtocol": "tcp", + "FromPort": 22, + "ToPort": 22, + "UserIdGroupPairs": [], + "IpRanges": [ + { + "Description": "Allow SSH access to managed worker nodes in group ng1", + "CidrIp": "0.0.0.0/0" + } + ], + "Ipv6Ranges": [ + { + "Description": "Allow SSH access to managed worker nodes in group ng1", + "CidrIpv6": "::/0" + } + ], + "PrefixListIds": [] + } + ] + } + ] +} +``` + +### **3. ๋ณด์•ˆ ๊ทธ๋ฃน ID ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •** + +```bash +export MNSGID=$(aws ec2 describe-security-groups --filters "Name=group-name,Values=*remoteAccess*" --query 'SecurityGroups[*].GroupId' --output text) +``` + +### **4. ํ•ด๋‹น ๋ณด์•ˆ๊ทธ๋ฃน ์ธ๋ฐ”์šด๋“œ ๊ทœ์น™์— ๋ณธ์ธ์˜ ์ง‘ ๊ณต์ธ IP ์ถ”๊ฐ€** + +```bash +aws ec2 authorize-security-group-ingress --group-id $MNSGID --protocol '-1' --cidr $(curl -s ipinfo.io/ip)/32 +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +{ + "Return": true, + "SecurityGroupRules": [ + { + "SecurityGroupRuleId": "sgr-0f7c4b1164d618a4c", + "GroupId": "sg-0387b57d9b1586fe5", + "GroupOwnerId": "378102432899", + "IsEgress": false, + "IpProtocol": "-1", + "FromPort": -1, + "ToPort": -1, + "CidrIpv4": "182.230.60.93/32", + "SecurityGroupRuleArn": "arn:aws:ec2:ap-northeast-2:378102432899:security-group-rule/sgr-0f7c4b1164d618a4c" + } + ] +} +``` + +### **5. ํ•ด๋‹น ๋ณด์•ˆ ๊ทธ๋ฃน์˜ ์ธ๋ฐ”์šด๋“œ ๊ทœ์น™์— ์šด์˜ ์„œ๋ฒ„ ๋‚ด๋ถ€ IP ์ถ”๊ฐ€** + +```bash +aws ec2 authorize-security-group-ingress --group-id $MNSGID --protocol '-1' --cidr 172.20.1.100/32 +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +{ + "Return": true, + "SecurityGroupRules": [ + { + "SecurityGroupRuleId": "sgr-0831563999950b76b", + "GroupId": "sg-0387b57d9b1586fe5", + "GroupOwnerId": "378102432899", + "IsEgress": false, + "IpProtocol": "-1", + "FromPort": -1, + "ToPort": -1, + "CidrIpv4": "172.20.1.100/32", + "SecurityGroupRuleArn": "arn:aws:ec2:ap-northeast-2:378102432899:security-group-rule/sgr-0831563999950b76b" + } + ] +} +``` + +### **6. ์›Œ์ปค ๋…ธ๋“œ SSH ์ ‘์†** + +**(1) ๊ฐ ๋…ธ๋“œ์— SSH ์›๊ฒฉ ์ ‘์† ํ›„ ํ˜ธ์ŠคํŠธ๋ช… ์ถœ๋ ฅ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh -o StrictHostKeyChecking=no ec2-user@$i hostname; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << +Warning: Permanently added '15.164.227.37' (ED25519) to the list of known hosts. +ec2-user@15.164.227.37: Permission denied (publickey,gssapi-keyex,gssapi-with-mic). + +>> node 3.38.205.159 << +Warning: Permanently added '3.38.205.159' (ED25519) to the list of known hosts. +ec2-user@3.38.205.159: Permission denied (publickey,gssapi-keyex,gssapi-with-mic). + +>> node 43.200.163.0 << +Warning: Permanently added '43.200.163.0' (ED25519) to the list of known hosts. +ec2-user@43.200.163.0: Permission denied (publickey,gssapi-keyex,gssapi-with-mic). +``` + +**(2) N1 ๋…ธ๋“œ ์ ‘์†** + +```bash +ssh ec2-user@$N1 + +# ๊ฒฐ๊ณผ +A newer release of "Amazon Linux" is available. + Version 2023.6.20250211: + Version 2023.6.20250218: +Run "/usr/bin/dnf check-release-update" for full release and version update info + , #_ + ~\_ ####_ Amazon Linux 2023 + ~~ \_#####\ + ~~ \###| + ~~ \#/ ___ https://aws.amazon.com/linux/amazon-linux-2023 + ~~ V~' '-> + ~~~ / + ~~._. _/ + _/ _/ + _/m/' +Last login: Wed Feb 12 05:52:48 2025 from 52.94.123.236 +[ec2-user@ip-192-168-1-51 ~]$ exit +logout +Connection to 15.164.227.37 closed. +``` + +**(3) N2 ๋…ธ๋“œ ์ ‘์†** + +```bash +ssh ec2-user@$N2 + +# ๊ฒฐ๊ณผ +A newer release of "Amazon Linux" is available. + Version 2023.6.20250211: + Version 2023.6.20250218: +Run "/usr/bin/dnf check-release-update" for full release and version update info + , #_ + ~\_ ####_ Amazon Linux 2023 + ~~ \_#####\ + ~~ \###| + ~~ \#/ ___ https://aws.amazon.com/linux/amazon-linux-2023 + ~~ V~' '-> + ~~~ / + ~~._. _/ + _/ _/ + _/m/' +Last login: Wed Feb 12 05:52:48 2025 from 52.94.123.236 +[ec2-user@ip-192-168-2-42 ~]$ exit +logout +Connection to 3.38.205.159 closed. +``` + +**(4) N3 ๋…ธ๋“œ ์ ‘์†** + +```bash +ssh ec2-user@$N3 + +# ๊ฒฐ๊ณผ +A newer release of "Amazon Linux" is available. + Version 2023.6.20250211: + Version 2023.6.20250218: +Run "/usr/bin/dnf check-release-update" for full release and version update info + , #_ + ~\_ ####_ Amazon Linux 2023 + ~~ \_#####\ + ~~ \###| + ~~ \#/ ___ https://aws.amazon.com/linux/amazon-linux-2023 + ~~ V~' '-> + ~~~ / + ~~._. _/ + _/ _/ + _/m/' +Last login: Wed Feb 12 05:52:48 2025 from 52.94.123.236 +[ec2-user@ip-192-168-3-30 ~]$ exit +logout +Connection to 43.200.163.0 closed. +``` + +### **7. ๋…ธ๋“œ ๊ธฐ๋ณธ ์ •๋ณด ํ™•์ธ** + +**(1) ๋…ธ๋“œ๋ณ„ ์‹œ์Šคํ…œ ์ •๋ณด ์กฐํšŒ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh ec2-user@$i hostnamectl; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << + Static hostname: ip-192-168-1-51.ap-northeast-2.compute.internal + Icon name: computer-vm + Chassis: vm ๐Ÿ–ด + Machine ID: ec290d14e0f34366b2d3f2ea33b06253 + Boot ID: 89441b0cd579455ca36a97ae72436762 + Virtualization: amazon +Operating System: Amazon Linux 2023.6.20250203 + CPE OS Name: cpe:2.3:o:amazon:amazon_linux:2023 + Kernel: Linux 6.1.127-135.201.amzn2023.x86_64 + Architecture: x86-64 + Hardware Vendor: Amazon EC2 + Hardware Model: t3.medium +Firmware Version: 1.0 + +>> node 3.38.205.159 << + Static hostname: ip-192-168-2-42.ap-northeast-2.compute.internal + Icon name: computer-vm + Chassis: vm ๐Ÿ–ด + Machine ID: ec23b4795af458cac1beebec40e88e9b + Boot ID: 2e16f7e351a642deb439e4360f0e4f5c + Virtualization: amazon +Operating System: Amazon Linux 2023.6.20250203 + CPE OS Name: cpe:2.3:o:amazon:amazon_linux:2023 + Kernel: Linux 6.1.127-135.201.amzn2023.x86_64 + Architecture: x86-64 + Hardware Vendor: Amazon EC2 + Hardware Model: t3.medium +Firmware Version: 1.0 + +>> node 43.200.163.0 << + Static hostname: ip-192-168-3-30.ap-northeast-2.compute.internal + Icon name: computer-vm + Chassis: vm ๐Ÿ–ด + Machine ID: ec203a98663eebbcd25282168fa4a01d + Boot ID: 39dfaedc08cf445ca387e7ec0b9c7823 + Virtualization: amazon +Operating System: Amazon Linux 2023.6.20250203 + CPE OS Name: cpe:2.3:o:amazon:amazon_linux:2023 + Kernel: Linux 6.1.127-135.201.amzn2023.x86_64 + Architecture: x86-64 + Hardware Vendor: Amazon EC2 + Hardware Model: t3.medium +Firmware Version: 1.0 +``` + +**(2) ๋…ธ๋“œ๋ณ„ ๋„คํŠธ์›Œํฌ ์ธํ„ฐํŽ˜์ด์Šค ์ •๋ณด ์กฐํšŒ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh ec2-user@$i sudo ip -c addr; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host noprefixroute + valid_lft forever preferred_lft forever +2: ens5: mtu 9001 qdisc mq state UP group default qlen 1000 + link/ether 02:79:1b:57:05:df brd ff:ff:ff:ff:ff:ff + altname enp0s5 + inet 192.168.1.51/24 metric 1024 brd 192.168.1.255 scope global dynamic ens5 + valid_lft 2091sec preferred_lft 2091sec + inet6 fe80::79:1bff:fe57:5df/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +3: eni97f4361e4c2@if3: mtu 9001 qdisc noqueue state UP group default + link/ether 32:96:47:9a:d4:2f brd ff:ff:ff:ff:ff:ff link-netns cni-d5c2a5e6-1a50-b510-b895-2a949906fb4d + inet6 fe80::3096:47ff:fe9a:d42f/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +4: enif5e3248355d@if3: mtu 9001 qdisc noqueue state UP group default + link/ether c2:2e:98:dd:be:8d brd ff:ff:ff:ff:ff:ff link-netns cni-914d38b7-bff1-33e3-c759-762f2221a32a + inet6 fe80::c02e:98ff:fedd:be8d/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +5: ens6: mtu 9001 qdisc mq state UP group default qlen 1000 + link/ether 02:ee:52:94:22:3d brd ff:ff:ff:ff:ff:ff + altname enp0s6 + inet 192.168.1.137/24 brd 192.168.1.255 scope global ens6 + valid_lft forever preferred_lft forever + inet6 fe80::ee:52ff:fe94:223d/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever + +>> node 3.38.205.159 << +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host noprefixroute + valid_lft forever preferred_lft forever +2: ens5: mtu 9001 qdisc mq state UP group default qlen 1000 + link/ether 06:8d:29:e9:31:07 brd ff:ff:ff:ff:ff:ff + altname enp0s5 + inet 192.168.2.42/24 metric 1024 brd 192.168.2.255 scope global dynamic ens5 + valid_lft 2091sec preferred_lft 2091sec + inet6 fe80::48d:29ff:fee9:3107/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +3: eni98403b04a75@if3: mtu 9001 qdisc noqueue state UP group default + link/ether 02:d7:7d:25:42:c3 brd ff:ff:ff:ff:ff:ff link-netns cni-1c292e75-bd4d-12d0-5ce3-a6a0d152a92f + inet6 fe80::d7:7dff:fe25:42c3/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +4: enib3cc1ab608a@if3: mtu 9001 qdisc noqueue state UP group default + link/ether 7e:c2:a3:56:c6:38 brd ff:ff:ff:ff:ff:ff link-netns cni-18f2ec3b-04f9-6d85-85c0-38893a1630e2 + inet6 fe80::7cc2:a3ff:fe56:c638/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +5: ens6: mtu 9001 qdisc mq state UP group default qlen 1000 + link/ether 06:31:11:9e:26:ad brd ff:ff:ff:ff:ff:ff + altname enp0s6 + inet 192.168.2.136/24 brd 192.168.2.255 scope global ens6 + valid_lft forever preferred_lft forever + inet6 fe80::431:11ff:fe9e:26ad/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever + +>> node 43.200.163.0 << +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host noprefixroute + valid_lft forever preferred_lft forever +2: ens5: mtu 9001 qdisc mq state UP group default qlen 1000 + link/ether 0a:67:d0:6f:e6:9b brd ff:ff:ff:ff:ff:ff + altname enp0s5 + inet 192.168.3.30/24 metric 1024 brd 192.168.3.255 scope global dynamic ens5 + valid_lft 2087sec preferred_lft 2087sec + inet6 fe80::867:d0ff:fe6f:e69b/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +3: eni356985de846@if3: mtu 9001 qdisc noqueue state UP group default + link/ether d6:cd:95:65:d8:36 brd ff:ff:ff:ff:ff:ff link-netns cni-d3e410d2-9acb-0b2a-15eb-577b3914e495 + inet6 fe80::d4cd:95ff:fe65:d836/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +4: eni7432c2a8810@if3: mtu 9001 qdisc noqueue state UP group default + link/ether ee:79:9f:e1:ae:27 brd ff:ff:ff:ff:ff:ff link-netns cni-c80bddf0-cde4-c490-17c1-38c73da57ae0 + inet6 fe80::ec79:9fff:fee1:ae27/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +5: eni8a456b324b3@if3: mtu 9001 qdisc noqueue state UP group default + link/ether 8e:46:59:f7:d9:26 brd ff:ff:ff:ff:ff:ff link-netns cni-23fd0956-85bd-af34-5a2c-fb5d936419d7 + inet6 fe80::8c46:59ff:fef7:d926/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +6: enid3abba3d96f@if3: mtu 9001 qdisc noqueue state UP group default + link/ether f6:99:0c:7c:00:1d brd ff:ff:ff:ff:ff:ff link-netns cni-db4e1b60-fa45-df03-34e4-7db99d67f7a2 + inet6 fe80::f499:cff:fe7c:1d/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +7: ens6: mtu 9001 qdisc mq state UP group default qlen 1000 + link/ether 0a:4a:30:b7:cd:cb brd ff:ff:ff:ff:ff:ff + altname enp0s6 + inet 192.168.3.77/24 brd 192.168.3.255 scope global ens6 + valid_lft forever preferred_lft forever + inet6 fe80::84a:30ff:feb7:cdcb/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +8: eni0910e13e62f@if3: mtu 9001 qdisc noqueue state UP group default + link/ether 92:95:95:ba:c4:b5 brd ff:ff:ff:ff:ff:ff link-netns cni-3cc85dca-5044-6749-7925-406cfe916181 + inet6 fe80::9095:95ff:feba:c4b5/64 scope link proto kernel_ll + valid_lft forever preferred_lft forever +``` + +**(3) ๋…ธ๋“œ๋ณ„ ๋ธ”๋ก ๋””๋ฐ”์ด์Šค ์กฐํšŒ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh ec2-user@$i lsblk; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +nvme0n1 259:0 0 60G 0 disk +โ”œโ”€nvme0n1p1 259:1 0 60G 0 part / +โ”œโ”€nvme0n1p127 259:2 0 1M 0 part +โ””โ”€nvme0n1p128 259:3 0 10M 0 part /boot/efi + +>> node 3.38.205.159 << +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +nvme0n1 259:0 0 60G 0 disk +โ”œโ”€nvme0n1p1 259:1 0 60G 0 part / +โ”œโ”€nvme0n1p127 259:2 0 1M 0 part +โ””โ”€nvme0n1p128 259:3 0 10M 0 part /boot/efi + +>> node 43.200.163.0 << +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +nvme0n1 259:0 0 60G 0 disk +โ”œโ”€nvme0n1p1 259:1 0 60G 0 part / +โ”œโ”€nvme0n1p127 259:2 0 1M 0 part +โ””โ”€nvme0n1p128 259:3 0 10M 0 part /boot/efi +``` + +**(4) ๋…ธ๋“œ๋ณ„ ๋ฃจํŠธ ํŒŒ์ผ์‹œ์Šคํ…œ ์šฉ๋Ÿ‰ ์กฐํšŒ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh ec2-user@$i df -hT /; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << +Filesystem Type Size Used Avail Use% Mounted on +/dev/nvme0n1p1 xfs 60G 3.3G 57G 6% / + +>> node 3.38.205.159 << +Filesystem Type Size Used Avail Use% Mounted on +/dev/nvme0n1p1 xfs 60G 3.3G 57G 6% / + +>> node 43.200.163.0 << +Filesystem Type Size Used Avail Use% Mounted on +/dev/nvme0n1p1 xfs 60G 3.3G 57G 6% / +``` + +### **8. ์Šคํ† ๋ฆฌ์ง€ํด๋ž˜์Šค ์กฐํšŒ** + +```bash +kubectl get sc +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 36m +``` + +### **9. CSI ๋…ธ๋“œ ์กฐํšŒ** + +```bash +kubectl get csinodes +``` + +โœ…ย **์ถœ๋ ฅใ…‡** + +```bash +NAME DRIVERS AGE +ip-192-168-1-51.ap-northeast-2.compute.internal 1 26m +ip-192-168-2-42.ap-northeast-2.compute.internal 1 26m +ip-192-168-3-30.ap-northeast-2.compute.internal 1 26m +``` + +### **10. maxPods ์„ค์ • ํ™•์ธ** + +**(1) ๋…ธ๋“œ ์ตœ๋Œ€ ํŒŒ๋“œ ์ˆ˜ ์กฐํšŒ** + +```bash +kubectl get nodes -o custom-columns="NAME:.metadata.name,MAXPODS:.status.capacity.pods" +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME MAXPODS +ip-192-168-1-51.ap-northeast-2.compute.internal 60 +ip-192-168-2-42.ap-northeast-2.compute.internal 60 +ip-192-168-3-30.ap-northeast-2.compute.internal 60 +``` + +**(2) ๋…ธ๋“œ๋ณ„ kubelet ๊ธฐ๋ณธ ์„ค์ •์—์„œ maxPods ๊ฐ’ ํ™•์ธ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh ec2-user@$i sudo cat /etc/kubernetes/kubelet/config.json | grep maxPods; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << + "maxPods": 17, + +>> node 3.38.205.159 << + "maxPods": 17, + +>> node 43.200.163.0 << + "maxPods": 17, +``` + +**(3) ๋…ธ๋“œ๋ณ„ kubelet ์ถ”๊ฐ€ ์„ค์ • ํŒŒ์ผ์—์„œ maxPods ๊ฐ’ ํ™•์ธ** + +```bash +for i in $N1 $N2 $N3; do echo ">> node $i <<"; ssh ec2-user@$i sudo cat /etc/kubernetes/kubelet/config.json.d/00-nodeadm.conf | grep maxPods; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 15.164.227.37 << + "maxPods": 60 + +>> node 3.38.205.159 << + "maxPods": 60 + +>> node 43.200.163.0 << + "maxPods": 60 +``` + +### **11. ์šด์˜ ์„œ๋ฒ„ EC2 SSH ์›๊ฒฉ ์ ‘์† ํ›„ ๊ธฐ๋ณธ ์ •๋ณด ํ™•์ธ** + +**(1) ์šด์˜์„œ๋ฒ„ SSH ์ ‘์†** + +```bash +ssh -i kp-aews.pem ec2-user@$(aws cloudformation describe-stacks --stack-name myeks --query 'Stacks[*].Outputs[0].OutputValue' --output text) + +Warning: Identity file kp-aews.pem not accessible: No such file or directory. +Last login: Tue Feb 25 22:17:38 2025 from 182.230.60.93 + , #_ + ~\_ ####_ Amazon Linux 2 + ~~ \_#####\ + ~~ \###| AL2 End of Life is 2026-06-30. + ~~ \#/ ___ + ~~ V~' '-> + ~~~ / A newer version of Amazon Linux is available! + ~~._. _/ + _/ _/ Amazon Linux 2023, GA and supported until 2028-03-15. + _/m/' https://aws.amazon.com/linux/amazon-linux-2023/ + +Last login: Tue Feb 25 22:17:38 KST 2025 on pts/0 +(eks-user@myeks:N/A) [root@operator-host ~]# +``` + +**(2) default ๋„ค์ž„์ŠคํŽ˜์ด์Šค ์ ์šฉ** + +```bash +(eks-user@myeks:N/A) [root@operator-host ~]# k ns default +# ๊ฒฐ๊ณผ +Context "eks-user@myeks.ap-northeast-2.eksctl.io" modified. +Active namespace is "default". +``` + +**(3) ํ™˜๊ฒฝ๋ณ€์ˆ˜ ์ •๋ณด ํ™•์ธ** + +```bash +(eks-user@myeks:default) [root@operator-host ~]# export | egrep 'ACCOUNT|AWS_|CLUSTER|KUBERNETES|VPC|Subnet' | egrep -v 'KEY' +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +declare -x ACCOUNT_ID="xxxxxxxxxxxx" +declare -x AWS_DEFAULT_REGION="ap-northeast-2" +declare -x AWS_PAGER="" +declare -x CLUSTER_NAME="myeks" +declare -x KUBERNETES_VERSION="1.31" +declare -x PubSubnet1="subnet-011d8d6df3bab1c31" +declare -x PubSubnet2="subnet-004ed4a345eecd440" +declare -x PubSubnet3="subnet-068e9402c8bb97c66" +declare -x VPCID="vpc-017a9a38a294509ea" +``` + +**(4) krew ํ”Œ๋Ÿฌ๊ทธ์ธ ํ™•์ธ** + +```bash +(eks-user@myeks:default) [root@operator-host ~]# kubectl krew list +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +PLUGIN VERSION +ctx v0.9.5 +df-pv v0.3.0 +get-all v1.3.8 +krew v0.4.4 +neat v2.0.4 +ns v0.9.5 +oomd v0.0.7 +stern v1.32.0 +view-secret v0.13.0 +``` + +### **12. ์ธ์Šคํ„ด์Šค ์ •๋ณด ์กฐํšŒ** + +```bash +(eks-user@myeks:default) [root@operator-host ~]# aws ec2 describe-instances --query "Reservations[*].Instances[*].{InstanceID:InstanceId, PublicIPAdd:PublicIpAddress, PrivateIPAdd:PrivateIpAddress, InstanceName:Tags[?Key=='Name']|[0].Value, Status:State.Name}" --filters Name=instance-state-name,Values=running --output table +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +--------------------------------------------------------------------------------------- +| DescribeInstances | ++----------------------+-----------------+---------------+----------------+-----------+ +| InstanceID | InstanceName | PrivateIPAdd | PublicIPAdd | Status | ++----------------------+-----------------+---------------+----------------+-----------+ +| i-0c8dd7ee129df9f70 | myeks-ng1-Node | 192.168.3.30 | 43.200.163.0 | running | +| i-0cf5292e9106cff08 | operator-host | 172.20.1.100 | 13.124.11.68 | running | +| i-013587fe8ee35bdf3 | myeks-ng1-Node | 192.168.1.51 | 15.164.227.37 | running | +| i-05e7d5c353553c7ff | myeks-ng1-Node | 192.168.2.42 | 3.38.205.159 | running | ++----------------------+-----------------+---------------+----------------+-----------+ +``` + +### **13. PrivateIP ๋ณ€์ˆ˜ ์ง€์ •** + +```bash +(eks-user@myeks:default) [root@operator-host ~]# N1=$(kubectl get node --label-columns=topology.kubernetes.io/zone --selector=topology.kubernetes.io/zone=ap-northeast-2a -o jsonpath={.items[0].status.addresses[0].address}) +(eks-user@myeks:default) [root@operator-host ~]# N2=$(kubectl get node --label-columns=topology.kubernetes.io/zone --selector=topology.kubernetes.io/zone=ap-northeast-2b -o jsonpath={.items[0].status.addresses[0].address}) +(eks-user@myeks:default) [root@operator-host ~]# N3=$(kubectl get node --label-columns=topology.kubernetes.io/zone --selector=topology.kubernetes.io/zone=ap-northeast-2c -o jsonpath={.items[0].status.addresses[0].address}) +(eks-user@myeks:default) [root@operator-host ~]# echo "export N1=$N1" >> /etc/profile +(eks-user@myeks:default) [root@operator-host ~]# echo "export N2=$N2" >> /etc/profile +(eks-user@myeks:default) [root@operator-host ~]# echo "export N3=$N3" >> /etc/profile +(eks-user@myeks:default) [root@operator-host ~]# echo $N1, $N2, $N3 +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +192.168.1.51, 192.168.2.42, 192.168.3.30 +``` + +### **14. ๋…ธ๋“œ IP ๋กœ ping ํ…Œ์ŠคํŠธ** + +```bash +(eks-user@myeks:default) [root@operator-host ~]# for i in $N1 $N2 $N3; do echo ">> node $i <<"; ping -c 1 $i ; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>> node 192.168.1.51 << +PING 192.168.1.51 (192.168.1.51) 56(84) bytes of data. +64 bytes from 192.168.1.51: icmp_seq=1 ttl=127 time=0.830 ms + +--- 192.168.1.51 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.830/0.830/0.830/0.000 ms + +>> node 192.168.2.42 << +PING 192.168.2.42 (192.168.2.42) 56(84) bytes of data. +64 bytes from 192.168.2.42: icmp_seq=1 ttl=127 time=1.04 ms + +--- 192.168.2.42 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 1.047/1.047/1.047/0.000 ms + +>> node 192.168.3.30 << +PING 192.168.3.30 (192.168.3.30) 56(84) bytes of data. +64 bytes from 192.168.3.30: icmp_seq=1 ttl=127 time=1.39 ms + +--- 192.168.3.30 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 1.392/1.392/1.392/0.000 ms +``` + +### **15. kube-ops-view ์„ค์น˜** + +```bash +helm repo add geek-cookbook https://geek-cookbook.github.io/charts/ + +# ๊ฒฐ๊ณผ +"geek-cookbook" already exists with the same configuration, skipping +``` + +```bash +helm install kube-ops-view geek-cookbook/kube-ops-view --version 1.2.2 --set service.main.type=ClusterIP --set env.TZ="Asia/Seoul" --namespace kube-system + +# ๊ฒฐ๊ณผ +NAME: kube-ops-view +LAST DEPLOYED: Tue Feb 25 23:43:00 2025 +NAMESPACE: kube-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +1. Get the application URL by running these commands: + export POD_NAME=$(kubectl get pods --namespace kube-system -l "app.kubernetes.io/name=kube-ops-view,app.kubernetes.io/instance=kube-ops-view" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:8080 + +``` + +### **16. gp3 ์Šคํ† ๋ฆฌ์ง€ ํด๋ž˜์Šค ์ƒ์„ฑ ๋ฐ ์กฐํšŒ** + +**(1) gp3 ์Šคํ† ๋ฆฌ์ง€ ํด๋ž˜์Šค ์ƒ์„ฑ** + +```bash +cat < 443/TCP 8m54s +service/eks-extension-metrics-api ClusterIP 10.100.95.88 443/TCP 112m +service/kube-dns ClusterIP 10.100.0.10 53/UDP,53/TCP,9153/TCP 107m +service/kube-ops-view ClusterIP 10.100.19.254 8080/TCP 12m +service/metrics-server ClusterIP 10.100.8.241 443/TCP 107m + +NAME ENDPOINTS AGE +endpoints/aws-load-balancer-webhook-service 192.168.1.114:9443,192.168.2.127:9443 8m54s +endpoints/eks-extension-metrics-api 172.0.32.0:10443 112m +endpoints/kube-dns 192.168.3.140:53,192.168.3.184:53,192.168.3.140:53 + 3 more... 107m +endpoints/kube-ops-view 192.168.2.249:8080 12m +endpoints/metrics-server 192.168.3.110:10251,192.168.3.8:10251 107m +``` + +### **23. Kube Ops View ์ ‘์† ์ •๋ณด ํ™•์ธ** + +```bash +echo -e "Kube Ops View URL = https://kubeopsview.$MyDomain/#scale=1.5" +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Kube Ops View URL = https://kubeopsview.gagajin.com/#scale=1.5 +``` + +**์ ‘์† ํ™”๋ฉด** + +![Image](https://github.com/user-attachments/assets/aa1e8c49-f6a1-4829-be44-1a331a3de445) +--- + +## **๐Ÿ“ฆ Bookinfo ์–ดํ”Œ๋ฆฌ์ผ€์ด์…˜ ๋ฐฐํฌ** + +### **1. Bookinfo ์–ดํ”Œ๋ฆฌ์ผ€์ด์…˜ ๋ฐฐํฌ** + +```bash +kubectl apply -f https://raw.githubusercontent.com/istio/istio/refs/heads/master/samples/bookinfo/platform/kube/bookinfo.yaml + +# ๊ฒฐ๊ณผ +service/details created +serviceaccount/bookinfo-details created +deployment.apps/details-v1 created +service/ratings created +serviceaccount/bookinfo-ratings created +deployment.apps/ratings-v1 created +service/reviews created +serviceaccount/bookinfo-reviews created +deployment.apps/reviews-v1 created +deployment.apps/reviews-v2 created +deployment.apps/reviews-v3 created +service/productpage created +serviceaccount/bookinfo-productpage created +deployment.apps/productpage-v1 created +``` + +### **2. ALB Ingress ์ƒ์„ฑ** + +```bash +cat <.*" ; echo "--------------" ; sleep 1; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Simple Bookstore App +-------------- +Simple Bookstore App +-------------- +Simple Bookstore App +-------------- +Simple Bookstore App +-------------- +Simple Bookstore App +... +``` + +### **5. productpage ์ ‘์†** + +- **์ดˆ๊ธฐ ์ง„์ž…** + +![Image](https://github.com/user-attachments/assets/91fc1114-6fbc-48e1-9f90-e3aaa74413b7) + +- **์ƒˆ๋กœ ๊ณ ์นจ ํ›„ Reviews์™€ Ratings ๋ณ€๊ฒฝ ์‚ฌํ•ญ ํ™•์ธ** + +![Image](https://github.com/user-attachments/assets/0be6e078-d97f-41b2-bd5e-f8e9aea37a02) + +## **๐Ÿ“Š prometheus-stack ์„ค์น˜** + +### **1. helm ์ €์žฅ์†Œ ์ถ”๊ฐ€** + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + +"prometheus-community" already exists with the same configuration, skipping +``` + +### **2. ํŒŒ๋ผ๋ฏธํ„ฐ ํŒŒ์ผ ์ƒ์„ฑ** + +```bash +cat monitor-values.yaml +prometheus: + prometheusSpec: + scrapeInterval: "15s" + evaluationInterval: "15s" + podMonitorSelectorNilUsesHelmValues: false + serviceMonitorSelectorNilUsesHelmValues: false + retention: 5d + retentionSize: "10GiB" + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: gp3 + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 30Gi + + ingress: + enabled: true + ingressClassName: alb + hosts: + - prometheus.gagajin.com + paths: + - /* + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]' + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-northeast-2:378102432899:certificate/f967e8ca-f0b5-471d-bbe4-bee231aeb32b + alb.ingress.kubernetes.io/success-codes: 200-399 + alb.ingress.kubernetes.io/load-balancer-name: myeks-ingress-alb + alb.ingress.kubernetes.io/group.name: study + alb.ingress.kubernetes.io/ssl-redirect: '443' + +grafana: + defaultDashboardsTimezone: Asia/Seoul + adminPassword: prom-operator + + ingress: + enabled: true + ingressClassName: alb + hosts: + - grafana.gagajin.com + paths: + - /* + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]' + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-northeast-2:378102432899:certificate/f967e8ca-f0b5-471d-bbe4-bee231aeb32b + alb.ingress.kubernetes.io/success-codes: 200-399 + alb.ingress.kubernetes.io/load-balancer-name: myeks-ingress-alb + alb.ingress.kubernetes.io/group.name: study + alb.ingress.kubernetes.io/ssl-redirect: '443' + + persistence: + enabled: true + type: sts + storageClassName: "gp3" + accessModes: + - ReadWriteOnce + size: 20Gi + +alertmanager: + enabled: false +defaultRules: + create: false +kubeControllerManager: + enabled: false +kubeEtcd: + enabled: false +kubeScheduler: + enabled: false +prometheus-windows-exporter: + prometheus: + monitor: + enabled: false +``` + +### **3. helm ๋ฐฐํฌ** + +```bash +helm install kube-prometheus-stack prometheus-community/kube-prometheus-stack --version 69.3.1 \ +-f monitor-values.yaml --create-namespace --namespace monitoring +``` + +โœ…ย **์ถœ๋ ฅ** + +- ๋ฐฐํฌ ํ›„, ํ•ด๋‹น Ingress์™€ ์™ธ๋ถ€ ์ ‘๊ทผ ์„œ๋น„์Šค(ex. ๊ทธ๋ผํŒŒ๋‚˜)๋Š” ๋™์ผํ•œ ALB๋ฅผ ๊ณต์œ ํ•จ + +```bash +NAME: kube-prometheus-stack +LAST DEPLOYED: Wed Feb 26 00:42:35 2025 +NAMESPACE: monitoring +STATUS: deployed +REVISION: 1 +NOTES: +kube-prometheus-stack has been installed. Check its status by running: + kubectl --namespace monitoring get pods -l "release=kube-prometheus-stack" + +Get Grafana 'admin' user password by running: + + kubectl --namespace monitoring get secrets kube-prometheus-stack-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo + +Access Grafana local instance: + + export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus-stack" -oname) + kubectl --namespace monitoring port-forward $POD_NAME 3000 + +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. +``` + +- 4๊ฐœ์˜ Ingress๊ฐ€ ํ•˜๋‚˜์˜ ALB๋ฅผ ๊ณต์œ ํ•˜๊ณ  ์žˆ์Œ + +![Image](https://github.com/user-attachments/assets/38e6ff60-ff55-4ab7-94ac-66012a9e07e8) + +--- + +## **๐Ÿ“ Logging in EKS** + +### **1. ๋ชจ๋“  ๋กœ๊น… ํ™œ์„ฑํ™”** + +```bash +aws eks update-cluster-config --region ap-northeast-2 --name $CLUSTER_NAME \ + --logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":true}]}' +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +{ + "update": { + "id": "ea2559f4-c4d8-3661-8735-63667c8ff514", + "status": "InProgress", + "type": "LoggingUpdate", + "params": [ + { + "type": "ClusterLogging", + "value": "{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}" + } + ], + "createdAt": "2025-02-26T00:53:57.522000+09:00", + "errors": [] + } +} +``` + +### **2. ๋กœ๊ทธ ๊ทธ๋ฃน ํ™•์ธ** + +```bash +aws logs describe-log-groups | jq +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +{ + "logGroups": [ + { + "logGroupName": "/aws/eks/myeks/cluster", + "creationTime": 1740498852305, + "metricFilterCount": 0, + "arn": "arn:aws:logs:ap-northeast-2:378102432899:log-group:/aws/eks/myeks/cluster:*", + "storedBytes": 0, + "logGroupClass": "STANDARD", + "logGroupArn": "arn:aws:logs:ap-northeast-2:378102432899:log-group:/aws/eks/myeks/cluster" + } + ] +} +``` + +- ๋กœ๊ทธ ๊ทธ๋ฃน์€ AWS CloudWatch์˜ **Log Groups > Log Streams**์—์„œ ํ™•์ธ ๊ฐ€๋Šฅ + +![Image](https://github.com/user-attachments/assets/4f7cb4fc-1c2d-4e52-846c-9c2bd0d7f062) + +- **Log Streams ์ค‘ ํ•˜๋‚˜๋ฅผ ์„ ํƒํ•œ ํ™”๋ฉด** + +![Image](https://github.com/user-attachments/assets/a831686e-56c8-42c7-a8d4-01f1d6925268) + +- **ํด๋ฆญ ์‹œ ๊ฐ ๋กœ๊ทธ์˜ ์ƒ์„ธ ์ •๋ณด ํ™•์ธ ๊ฐ€๋Šฅ** + +![Image](https://github.com/user-attachments/assets/c9285e6d-612c-4e44-93ba-5ba8bcb7a127) + +### **3. ์‹ ๊ทœ ๋กœ๊ทธ ์‹ค์‹œ๊ฐ„ ์ถœ๋ ฅ** + +```bash +aws logs tail /aws/eks/$CLUSTER_NAME/cluster --follow +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"2b3c4273-a038-4a6f-abd5-edc834383b56","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/kube-scheduler?timeout=5s","verb":"update","user":{"username":"system:kube-scheduler","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"kube-scheduler/v1.31.5 (linux/amd64) kubernetes/226ac90/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"kube-scheduler","uid":"01d6ca7e-dd37-473d-aa26-01494c12e266","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"1724"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:03.969196Z","stageTimestamp":"2025-02-25T13:11:03.979371Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:kube-scheduler\" of ClusterRole \"system:kube-scheduler\" to User \"system:kube-scheduler\""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"4df656f9-d98e-4235-82df-765e8fbff0f6","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/kube-controller-manager?timeout=5s","verb":"update","user":{"username":"system:kube-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"kube-controller-manager/v1.31.5 (linux/amd64) kubernetes/226ac90/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"kube-controller-manager","uid":"c29f94f4-c32a-4257-bb39-a55ef1a5d344","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"1725"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:04.040056Z","stageTimestamp":"2025-02-25T13:11:04.047041Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:kube-controller-manager\" of ClusterRole \"system:kube-controller-manager\" to User \"system:kube-controller-manager\""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"7561cbc1-669c-483d-beb4-e2d5f476db84","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/cp-vpc-resource-controller","verb":"update","user":{"username":"eks:vpc-resource-controller","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"cp-vpc-resource-controller","uid":"f59db5c0-b9d4-44a6-ae4a-5ac89057d314","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"1715"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:04.141659Z","stageTimestamp":"2025-02-25T13:11:04.149050Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks-vpc-resource-controller-rolebinding/kube-system\" of Role \"eks-vpc-resource-controller-role\" to User \"eks:vpc-resource-controller\""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"1161d386-d9d5-4fb4-b502-badd4b8183f2","stage":"ResponseComplete","requestURI":"/apis/rbac.authorization.k8s.io/v1/roles?allowWatchBookmarks=true\u0026resourceVersion=1\u0026timeout=7m40s\u0026timeoutSeconds=460\u0026watch=true","verb":"watch","user":{"username":"system:apiserver","uid":"9ca9c2e9-c750-4409-8614-48aa8a617154","groups":["system:authenticated","system:masters"]},"sourceIPs":["::1"],"userAgent":"kube-apiserver/v1.31.5 (linux/amd64) kubernetes/226ac90","objectRef":{"resource":"roles","apiGroup":"rbac.authorization.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:03:24.390790Z","stageTimestamp":"2025-02-25T13:11:04.391153Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"1c3da5ba-2fbc-4d35-b44b-ffac911ef1a2","stage":"ResponseComplete","requestURI":"/api/v1/resourcequotas?allowWatchBookmarks=true\u0026resourceVersion=1\u0026timeout=7m40s\u0026timeoutSeconds=460\u0026watch=true","verb":"watch","user":{"username":"system:apiserver","uid":"9ca9c2e9-c750-4409-8614-48aa8a617154","groups":["system:authenticated","system:masters"]},"sourceIPs":["::1"],"userAgent":"kube-apiserver/v1.31.5 (linux/amd64) kubernetes/226ac90","objectRef":{"resource":"resourcequotas","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:03:24.390287Z","stageTimestamp":"2025-02-25T13:11:04.391156Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"16bbd516-cfef-4d53-b89c-c4f1748d4298","stage":"ResponseStarted","requestURI":"/api/v1/resourcequotas?allowWatchBookmarks=true\u0026resourceVersion=1719\u0026timeout=7m28s\u0026timeoutSeconds=448\u0026watch=true","verb":"watch","user":{"username":"system:apiserver","uid":"9ca9c2e9-c750-4409-8614-48aa8a617154","groups":["system:authenticated","system:masters"]},"sourceIPs":["::1"],"userAgent":"kube-apiserver/v1.31.5 (linux/amd64) kubernetes/226ac90","objectRef":{"resource":"resourcequotas","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:04.391651Z","stageTimestamp":"2025-02-25T13:11:04.392143Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"24eccec6-3653-47a8-aeda-a673e79def24","stage":"ResponseStarted","requestURI":"/apis/rbac.authorization.k8s.io/v1/roles?allowWatchBookmarks=true\u0026resourceVersion=1709\u0026timeout=5m5s\u0026timeoutSeconds=305\u0026watch=true","verb":"watch","user":{"username":"system:apiserver","uid":"9ca9c2e9-c750-4409-8614-48aa8a617154","groups":["system:authenticated","system:masters"]},"sourceIPs":["::1"],"userAgent":"kube-apiserver/v1.31.5 (linux/amd64) kubernetes/226ac90","objectRef":{"resource":"roles","apiGroup":"rbac.authorization.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:04.391661Z","stageTimestamp":"2025-02-25T13:11:04.392143Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"bf6b9d21-54b8-41a6-805f-2876e4e548a7","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/eks-coredns-autoscaler","verb":"get","user":{"username":"eks:coredns-autoscaler","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"eks-coredns-autoscaler","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:04.726606Z","stageTimestamp":"2025-02-25T13:11:04.736303Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks:coredns-autoscaler/kube-system\" of Role \"eks:coredns-autoscaler\" to User \"eks:coredns-autoscaler\""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"e923c96c-37ad-46e1-b8cf-cfdd70a223b4","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/eks-coredns-autoscaler","verb":"update","user":{"username":"eks:coredns-autoscaler","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"eks-coredns-autoscaler","uid":"932dade7-3029-44d3-97fe-d3e2ce464d77","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"1717"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:04.737429Z","stageTimestamp":"2025-02-25T13:11:04.744137Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks:coredns-autoscaler/kube-system\" of Role \"eks:coredns-autoscaler\" to User \"eks:coredns-autoscaler\""}} +2025-02-25T15:54:13.360000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"6ae84f9d-e668-48d2-ad5b-b2ce59de7016","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/cloud-controller-manager?timeout=5s","verb":"update","user":{"username":"eks:cloud-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"aws-cloud-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"cloud-controller-manager","uid":"1c063b11-ea0f-4687-957a-64abd9ba643f","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"1726"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:11:05.063753Z","stageTimestamp":"2025-02-25T13:11:05.069911Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"eks:cloud-controller-manager\" of ClusterRole \"eks:cloud-controller-manager\" to User \"eks:cloud-controller-manager\""}} +... +``` + +### **4. ํ•„ํ„ฐ ์ ์šฉ ์‹ ๊ทœ ๋กœ๊ทธ ์‹ค์‹œ๊ฐ„ ์ถœ๋ ฅ** + +- **kube-proxy** + +```bash +aws logs tail /aws/eks/$CLUSTER_NAME/cluster --filter-pattern kube-proxy +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-25T15:54:16.406000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"a82382c7-8df1-404c-bf42-2ef6298fe4b1","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/external-snapshotter-leader-ebs-csi-aws-com","verb":"update","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=865e7205-fc52-4566-9fce-c2cff49b97e0"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-z4t4z"],"authentication.kubernetes.io/pod-uid":["5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee"]}},"sourceIPs":["192.168.1.226"],"userAgent":"csi-snapshotter/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"external-snapshotter-leader-ebs-csi-aws-com","uid":"92ac90d6-7010-493c-b66b-e2ae3b219877","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"14090"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:19.049041Z","stageTimestamp":"2025-02-25T14:00:19.058387Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:54:16.406000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"35842df4-2154-4f96-85ee-fda63be025df","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/cloud-controller-manager?timeout=5s","verb":"get","user":{"username":"eks:cloud-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.166.208"],"userAgent":"aws-cloud-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"cloud-controller-manager","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:19.276815Z","stageTimestamp":"2025-02-25T14:00:19.281496Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"eks:cloud-controller-manager\" of ClusterRole \"eks:cloud-controller-manager\" to User \"eks:cloud-controller-manager\""}} +2025-02-25T15:54:16.406000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"3af25ed2-f450-411c-be26-fb4b8c4705c6","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/eks-certificates-controller","verb":"get","user":{"username":"eks:certificate-controller","groups":["system:authenticated"]},"sourceIPs":["10.0.166.208"],"userAgent":"eks-certificates-controller/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"eks-certificates-controller","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:19.597957Z","stageTimestamp":"2025-02-25T14:00:19.602796Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks:certificate-controller/kube-system\" of Role \"eks:certificate-controller\" to User \"eks:certificate-controller\""}} +2025-02-25T15:54:16.406000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"c1f85bd8-e8a2-401f-933c-7b0719df8801","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/ip-192-168-2-42.ap-northeast-2.compute.internal?timeout=10s","verb":"update","user":{"username":"system:node:ip-192-168-2-42.ap-northeast-2.compute.internal","uid":"aws-iam-authenticator:378102432899:AROAVQCFJISBU4MUNGJ6F","groups":["system:nodes","system:authenticated"],"extra":{"accessKeyId":["ASIAVQCFJISBXQFDTLPR"],"arn":["arn:aws:sts::378102432899:assumed-role/eksctl-myeks-nodegroup-ng1-NodeInstanceRole-L6JI06tBfF9M/i-05e7d5c353553c7ff"],"canonicalArn":["arn:aws:iam::378102432899:role/eksctl-myeks-nodegroup-ng1-NodeInstanceRole-L6JI06tBfF9M"],"principalId":["AROAVQCFJISBU4MUNGJ6F"],"sessionName":["i-05e7d5c353553c7ff"],"sigs.k8s.io/aws-iam-authenticator/principalId":["AROAVQCFJISBU4MUNGJ6F"]}},"sourceIPs":["192.168.2.42"],"userAgent":"kubelet/v1.31.5 (linux/amd64) kubernetes/5fcf7ca","objectRef":{"resource":"leases","namespace":"kube-node-lease","name":"ip-192-168-2-42.ap-northeast-2.compute.internal","uid":"8aede9d1-e662-4a6e-8485-9111f8361d31","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"14075"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:20.402559Z","stageTimestamp":"2025-02-25T14:00:20.411611Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} +2025-02-25T15:54:16.406000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"39c6e8c0-3efc-4487-bbf9-c91b02f80ab3","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/external-resizer-ebs-csi-aws-com","verb":"update","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=865e7205-fc52-4566-9fce-c2cff49b97e0"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-z4t4z"],"authentication.kubernetes.io/pod-uid":["5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee"]}},"sourceIPs":["192.168.1.226"],"userAgent":"csi-resizer/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"external-resizer-ebs-csi-aws-com","uid":"3f6b69bd-79a5-4eec-afbb-2e4dde349fd2","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"14098"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:20.535694Z","stageTimestamp":"2025-02-25T14:00:20.549846Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:54:16.407000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"1e99e72b-20d7-40ac-8c19-7d0993709028","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/kube-controller-manager?timeout=5s","verb":"get","user":{"username":"system:kube-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.166.208"],"userAgent":"kube-controller-manager/v1.31.5 (linux/amd64) kubernetes/226ac90/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"kube-controller-manager","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:21.019326Z","stageTimestamp":"2025-02-25T14:00:21.023031Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:kube-controller-manager\" of ClusterRole \"system:kube-controller-manager\" to User \"system:kube-controller-manager\""}} +2025-02-25T15:54:16.407000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"00642e57-9ceb-42e9-970b-d2a37f34246b","stage":"ResponseComplete","requestURI":"/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true\u0026resourceVersion=12312\u0026timeout=7m14s\u0026timeoutSeconds=434\u0026watch=true","verb":"watch","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=865e7205-fc52-4566-9fce-c2cff49b97e0"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-z4t4z"],"authentication.kubernetes.io/pod-uid":["5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee"]}},"sourceIPs":["192.168.1.226"],"userAgent":"csi-provisioner/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"storageclasses","apiGroup":"storage.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T13:53:07.025573Z","stageTimestamp":"2025-02-25T14:00:21.027134Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"ebs-csi-provisioner-binding\" of ClusterRole \"ebs-external-provisioner-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:54:16.407000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"3145c12d-9b32-407d-8a52-4197e4992f8e","stage":"ResponseStarted","requestURI":"/apis/storage.k8s.io/v1/storageclasses?allowWatchBookmarks=true\u0026resourceVersion=14112\u0026timeout=9m27s\u0026timeoutSeconds=567\u0026watch=true","verb":"watch","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=865e7205-fc52-4566-9fce-c2cff49b97e0"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-z4t4z"],"authentication.kubernetes.io/pod-uid":["5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee"]}},"sourceIPs":["192.168.1.226"],"userAgent":"csi-provisioner/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"storageclasses","apiGroup":"storage.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:21.028988Z","stageTimestamp":"2025-02-25T14:00:21.029785Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"ebs-csi-provisioner-binding\" of ClusterRole \"ebs-external-provisioner-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:54:16.407000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"d34b1bc7-7757-408a-a570-0b14475f7903","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/ebs-csi-aws-com","verb":"update","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=865e7205-fc52-4566-9fce-c2cff49b97e0"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-z4t4z"],"authentication.kubernetes.io/pod-uid":["5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee"]}},"sourceIPs":["192.168.1.226"],"userAgent":"csi-provisioner/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"ebs-csi-aws-com","uid":"1f757aa3-b7b7-4f26-a1b4-f45e6cd16f2e","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"14105"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:21.830745Z","stageTimestamp":"2025-02-25T14:00:21.839082Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:54:16.407000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"5dee7581-096d-4e9c-9881-44c3e323292c","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/cloud-controller-manager?timeout=5s","verb":"get","user":{"username":"eks:cloud-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.166.208"],"userAgent":"aws-cloud-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"cloud-controller-manager","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:22.000008Z","stageTimestamp":"2025-02-25T14:00:22.004881Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"eks:cloud-controller-manager\" of ClusterRole \"eks:cloud-controller-manager\" to User \"eks:cloud-controller-manager\""}} +2025-02-25T15:54:16.407000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"5702a190-62e1-4c05-8a38-47eaa94d7374","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/external-resizer-ebs-csi-aws-com","verb":"get","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=31351c1c-35cf-4748-a3f2-851d2e72fa8b"],"authentication.kubernetes.io/node-name":["ip-192-168-2-42.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["65d9b899-3d7d-4a54-a9b7-7fd4c0be0af4"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-p57xw"],"authentication.kubernetes.io/pod-uid":["4cf5ca4a-fae6-490f-9757-6fb2f93248bf"]}},"sourceIPs":["192.168.2.47"],"userAgent":"csi-resizer/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"external-resizer-ebs-csi-aws-com","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T14:00:22.178659Z","stageTimestamp":"2025-02-25T14:00:22.182656Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +... +``` + +### **5. ๋กœ๊ทธ ์ŠคํŠธ๋ฆผ ์‹ค์‹œ๊ฐ„ ์ถœ๋ ฅ** + +- **kube-apiserver ๋กœ๊ทธ ํ™•์ธ** + +```bash +aws logs tail /aws/eks/$CLUSTER_NAME/cluster --log-stream-name-prefix kube-apiserver --follow +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-25T15:59:06.241000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"b06fe424-afeb-4e3f-910b-e05548a7b5b2","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/ip-192-168-2-42.ap-northeast-2.compute.internal?timeout=10s","verb":"update","user":{"username":"system:node:ip-192-168-2-42.ap-northeast-2.compute.internal","uid":"aws-iam-authenticator:378102432899:AROAVQCFJISBU4MUNGJ6F","groups":["system:nodes","system:authenticated"],"extra":{"accessKeyId":["ASIAVQCFJISBVL7MICKQ"],"arn":["arn:aws:sts::378102432899:assumed-role/eksctl-myeks-nodegroup-ng1-NodeInstanceRole-L6JI06tBfF9M/i-05e7d5c353553c7ff"],"canonicalArn":["arn:aws:iam::378102432899:role/eksctl-myeks-nodegroup-ng1-NodeInstanceRole-L6JI06tBfF9M"],"principalId":["AROAVQCFJISBU4MUNGJ6F"],"sessionName":["i-05e7d5c353553c7ff"],"sigs.k8s.io/aws-iam-authenticator/principalId":["AROAVQCFJISBU4MUNGJ6F"]}},"sourceIPs":["192.168.2.42"],"userAgent":"kubelet/v1.31.5 (linux/amd64) kubernetes/5fcf7ca","objectRef":{"resource":"leases","namespace":"kube-node-lease","name":"ip-192-168-2-42.ap-northeast-2.compute.internal","uid":"8aede9d1-e662-4a6e-8485-9111f8361d31","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"46557"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.088440Z","stageTimestamp":"2025-02-25T15:59:06.097720Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":""}} +2025-02-25T15:59:06.323000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"5bd76ccc-9d05-4ebc-9125-32f27207c823","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/kube-controller-manager?timeout=5s","verb":"update","user":{"username":"system:kube-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"kube-controller-manager/v1.31.5 (linux/amd64) kubernetes/226ac90/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"kube-controller-manager","uid":"c29f94f4-c32a-4257-bb39-a55ef1a5d344","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"46592"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:05.978575Z","stageTimestamp":"2025-02-25T15:59:05.989628Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"system:kube-controller-manager\" of ClusterRole \"system:kube-controller-manager\" to User \"system:kube-controller-manager\""}} +2025-02-25T15:59:06.323000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"87f7d01e-cee5-49e6-bdd1-7b8196c41235","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/eks-coredns-autoscaler","verb":"get","user":{"username":"eks:coredns-autoscaler","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"eks-coredns-autoscaler","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.137901Z","stageTimestamp":"2025-02-25T15:59:06.143032Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks:coredns-autoscaler/kube-system\" of Role \"eks:coredns-autoscaler\" to User \"eks:coredns-autoscaler\""}} +2025-02-25T15:59:06.323000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"7478ba71-efe6-4415-85eb-4c2836c1e572","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/eks-coredns-autoscaler","verb":"update","user":{"username":"eks:coredns-autoscaler","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"eks-coredns-autoscaler","uid":"932dade7-3029-44d3-97fe-d3e2ce464d77","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"46580"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.143926Z","stageTimestamp":"2025-02-25T15:59:06.156049Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks:coredns-autoscaler/kube-system\" of Role \"eks:coredns-autoscaler\" to User \"eks:coredns-autoscaler\""}} +2025-02-25T15:59:06.573000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"ef9046f2-1f70-41d3-a7a2-f4d64f57e4a4","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/aws-load-balancer-controller-leader","verb":"update","user":{"username":"system:serviceaccount:kube-system:aws-load-balancer-controller","uid":"995573f3-a0e4-4424-9868-81cbf2295751","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=e5b23c6c-a2ce-448f-9aa8-0422d01ebae3"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["aws-load-balancer-controller-554fbd9d-kbctn"],"authentication.kubernetes.io/pod-uid":["dc66f5b2-ef3f-4ea7-908f-0db4fbd2f275"]}},"sourceIPs":["192.168.1.114"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"aws-load-balancer-controller-leader","uid":"29062207-ba15-45fd-a7f3-52b4580d16f3","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"46593"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.198663Z","stageTimestamp":"2025-02-25T15:59:06.221411Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"aws-load-balancer-controller-leader-election-rolebinding/kube-system\" of Role \"aws-load-balancer-controller-leader-election-role\" to ServiceAccount \"aws-load-balancer-controller/kube-system\""}} +2025-02-25T15:59:06.828000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"abab7321-e086-4097-b246-924c9f1769b6","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/external-attacher-leader-ebs-csi-aws-com","verb":"get","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=ebc1f5d6-37da-45f3-b291-82cf0fcc6367"],"authentication.kubernetes.io/node-name":["ip-192-168-2-42.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["65d9b899-3d7d-4a54-a9b7-7fd4c0be0af4"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-p57xw"],"authentication.kubernetes.io/pod-uid":["4cf5ca4a-fae6-490f-9757-6fb2f93248bf"]}},"sourceIPs":["192.168.2.47"],"userAgent":"csi-attacher/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"external-attacher-leader-ebs-csi-aws-com","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.454289Z","stageTimestamp":"2025-02-25T15:59:06.457958Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:59:07.075000+00:00 kube-apiserver-audit-4e3006095bad62c5b3575ab7dbd2cfbb {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"47e16d35-f40b-4fdf-a301-4f1bd38e5879","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/cloud-controller-manager?timeout=5s","verb":"update","user":{"username":"eks:cloud-controller-manager","groups":["system:authenticated"]},"sourceIPs":["10.0.116.214"],"userAgent":"aws-cloud-controller-manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"cloud-controller-manager","uid":"1c063b11-ea0f-4687-957a-64abd9ba643f","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"46596"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.593881Z","stageTimestamp":"2025-02-25T15:59:06.601897Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"eks:cloud-controller-manager\" of ClusterRole \"eks:cloud-controller-manager\" to User \"eks:cloud-controller-manager\""}} +2025-02-25T15:59:07.244000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"7715eca9-a8e1-4752-a460-0515516362fd","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/external-attacher-leader-ebs-csi-aws-com","verb":"update","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=09a67c38-ccca-4b60-aed4-964aac7bfc6b"],"authentication.kubernetes.io/node-name":["ip-192-168-1-51.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["58a47c72-0c03-4bac-b427-6b6b69b61305"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-z4t4z"],"authentication.kubernetes.io/pod-uid":["5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee"]}},"sourceIPs":["192.168.1.226"],"userAgent":"csi-attacher/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"external-attacher-leader-ebs-csi-aws-com","uid":"a567568d-f785-4767-b0a2-204ca3a0fb95","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"46581"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:06.183577Z","stageTimestamp":"2025-02-25T15:59:06.190737Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:59:07.244000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"76427af1-8ce6-467b-8c5e-1d54f3ba12b5","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/external-resizer-ebs-csi-aws-com","verb":"get","user":{"username":"system:serviceaccount:kube-system:ebs-csi-controller-sa","uid":"3f266af7-bcc9-4b75-9cd1-ef47f6fc1abb","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=ebc1f5d6-37da-45f3-b291-82cf0fcc6367"],"authentication.kubernetes.io/node-name":["ip-192-168-2-42.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["65d9b899-3d7d-4a54-a9b7-7fd4c0be0af4"],"authentication.kubernetes.io/pod-name":["ebs-csi-controller-7f8f8cb84-p57xw"],"authentication.kubernetes.io/pod-uid":["4cf5ca4a-fae6-490f-9757-6fb2f93248bf"]}},"sourceIPs":["192.168.2.47"],"userAgent":"csi-resizer/v0.0.0 (linux/amd64) kubernetes/$Format","objectRef":{"resource":"leases","namespace":"kube-system","name":"external-resizer-ebs-csi-aws-com","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:07.058273Z","stageTimestamp":"2025-02-25T15:59:07.063437Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"ebs-csi-leases-rolebinding/kube-system\" of Role \"ebs-csi-leases-role\" to ServiceAccount \"ebs-csi-controller-sa/kube-system\""}} +2025-02-25T15:59:07.244000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"7edc0aeb-e68d-40e2-96a5-5a795501f0e8","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/kube-system/leases/amazon-network-policy-controller-k8s","verb":"get","user":{"username":"eks:network-policy-controller","groups":["system:authenticated"]},"sourceIPs":["10.0.166.208"],"userAgent":"controller/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"kube-system","name":"amazon-network-policy-controller-k8s","apiGroup":"coordination.k8s.io","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:07.064113Z","stageTimestamp":"2025-02-25T15:59:07.070093Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by RoleBinding \"eks:network-policy-controller/kube-system\" of Role \"eks:network-policy-controller\" to User \"eks:network-policy-controller\""}} +2025-02-25T15:59:07.244000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"a2b02ed1-1be2-463c-8397-ff0bdbb72a96","stage":"ResponseComplete","requestURI":"/api/v1/nodes","verb":"list","user":{"username":"system:serviceaccount:kube-system:kube-ops-view","uid":"36439bb7-a7d4-464c-95fe-cfb4ff623176","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=220d0150-9f1a-4707-b10c-070055e30a53"],"authentication.kubernetes.io/node-name":["ip-192-168-2-42.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["65d9b899-3d7d-4a54-a9b7-7fd4c0be0af4"],"authentication.kubernetes.io/pod-name":["kube-ops-view-657dbc6cd8-pxkvr"],"authentication.kubernetes.io/pod-uid":["37bf2637-622f-4ee2-9804-a73d74e16f0e"]}},"sourceIPs":["192.168.2.249"],"userAgent":"pykube-ng/20.4.1","objectRef":{"resource":"nodes","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:07.188115Z","stageTimestamp":"2025-02-25T15:59:07.196179Z","annotations":{"authentication.k8s.io/stale-token":"subject: system:serviceaccount:kube-system:kube-ops-view, seconds after warning threshold: 959","authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"kube-ops-view\" of ClusterRole \"kube-ops-view\" to ServiceAccount \"kube-ops-view/kube-system\""}} +2025-02-25T15:59:07.244000+00:00 kube-apiserver-audit-4ff1d80de4851cc512375bb0568780f7 {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Request","auditID":"6265477b-e567-4f35-89e4-bdbaf878bc61","stage":"ResponseComplete","requestURI":"/api/v1/pods","verb":"list","user":{"username":"system:serviceaccount:kube-system:kube-ops-view","uid":"36439bb7-a7d4-464c-95fe-cfb4ff623176","groups":["system:serviceaccounts","system:serviceaccounts:kube-system","system:authenticated"],"extra":{"authentication.kubernetes.io/credential-id":["JTI=220d0150-9f1a-4707-b10c-070055e30a53"],"authentication.kubernetes.io/node-name":["ip-192-168-2-42.ap-northeast-2.compute.internal"],"authentication.kubernetes.io/node-uid":["65d9b899-3d7d-4a54-a9b7-7fd4c0be0af4"],"authentication.kubernetes.io/pod-name":["kube-ops-view-657dbc6cd8-pxkvr"],"authentication.kubernetes.io/pod-uid":["37bf2637-622f-4ee2-9804-a73d74e16f0e"]}},"sourceIPs":["192.168.2.249"],"userAgent":"pykube-ng/20.4.1","objectRef":{"resource":"pods","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2025-02-25T15:59:07.202326Z","stageTimestamp":"2025-02-25T15:59:07.226347Z","annotations":{"authentication.k8s.io/stale-token":"subject: system:serviceaccount:kube-system:kube-ops-view, seconds after warning threshold: 959","authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding \"kube-ops-view\" of ClusterRole \"kube-ops-view\" to ServiceAccount \"kube-ops-view/kube-system\""}} +... +``` + +- **CoreDNS scale ์ถ•์†Œ (2๊ฐœ โ†’ 1๊ฐœ)** + +```bash +(eks-user@myeks:default) [root@operator-host ~]# kubectl scale deployment -n kube-system coredns --replicas=1 +# ๊ฒฐ๊ณผ +deployment.apps/coredns scaled +``` + +```bash +aws logs tail /aws/eks/$CLUSTER_NAME/cluster --log-stream-name-prefix kube-controller-manager --follow +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-25T16:11:02.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:02.919710 9 replica_set.go:624] "Too many replicas" logger="replicaset-controller" replicaSet="kube-system/coredns-86f5954566" need=1 deleting=1 +2025-02-25T16:11:02.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:02.919757 9 replica_set.go:251] "Found related ReplicaSets" logger="replicaset-controller" replicaSet="kube-system/coredns-86f5954566" relatedReplicaSets=["kube-system/coredns-86f5954566"] +2025-02-25T16:11:02.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:02.919823 9 controller_utils.go:608] "Deleting pod" logger="replicaset-controller" controller="coredns-86f5954566" pod="kube-system/coredns-86f5954566-c8wl2" +2025-02-25T16:11:02.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:02.980081 9 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-86f5954566" duration="60.447223ms" +2025-02-25T16:11:02.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:02.993874 9 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-86f5954566" duration="13.736734ms" +2025-02-25T16:11:02.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:02.996455 9 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-86f5954566" duration="79.629ยตs" +2025-02-25T16:11:08.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:08.243402 9 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-86f5954566" duration="102.085ยตs" +2025-02-25T16:11:09.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:09.191159 9 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-86f5954566" duration="101.657ยตs" +2025-02-25T16:11:09.000000+00:00 kube-controller-manager-4e3006095bad62c5b3575ab7dbd2cfbb I0225 16:11:09.200150 9 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-86f5954566" duration="121.163ยตs" +``` + +### **6. CloudWatch Log Insights** + +**(1) `/aws/eks/myeks/cluster` ๋กœ๊ทธ ๊ทธ๋ฃน์„ ์„ ํƒ ํ›„ ์ฟผ๋ฆฌ ์‹คํ–‰** + +![Image](https://github.com/user-attachments/assets/aac43f46-c6d5-41ec-beba-ba77f04fc764) + +**(2) EC2 Instance๊ฐ€ NodeNotReady ์ƒํƒœ์ธ ๋กœ๊ทธ ๊ฒ€์ƒ‰** + +```bash +fields @timestamp, @message +| filter @message like /NodeNotReady/ +| sort @timestamp desc +``` + +ํ˜„์žฌ ๋…ธ๋“œ์— ์ด์ƒ์ด ์—†์–ด ๊ฒฐ๊ณผ๊ฐ€ ์—†์Œ + +![Image](https://github.com/user-attachments/assets/d1188f42-62d0-464b-b9d0-6db2e85a0472) + +**(3) kube-apiserver-audit ๋กœ๊ทธ์—์„œ userAgent ์ •๋ ฌํ•ด์„œ ์•„๋ž˜ 4๊ฐœ ํ•„๋“œ ์ •๋ณด ๊ฒ€์ƒ‰** + +```bash +fields userAgent, requestURI, @timestamp, @message +| filter @logStream ~= "kube-apiserver-audit" +| stats count(userAgent) as count by userAgent +| sort count desc +``` + +![Image](https://github.com/user-attachments/assets/b354485a-8078-40e4-9fb5-5905eb635bd9) + +### **7. ๋กœ๊น… ๋„๊ธฐ** + +**(1) EKS Control Plane ๋กœ๊น…(CloudWatch Logs) ๋น„ํ™œ์„ฑํ™”** + +```bash +eksctl utils update-cluster-logging --cluster $CLUSTER_NAME --region ap-northeast-2 --disable-types all --approve +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-26 01:21:15 [โ„น] will update CloudWatch logging for cluster "myeks" in "ap-northeast-2" (no types to enable & disable types: api, audit, authenticator, controllerManager, scheduler) +2025-02-26 01:21:46 [โœ”] configured CloudWatch logging for cluster "myeks" in "ap-northeast-2" (no types enabled & disabled types: api, audit, authenticator, controllerManager, scheduler) +``` + +**(2) ๋กœ๊ทธ ๊ทธ๋ฃน ์‚ญ์ œ** + +```bash +aws logs delete-log-group --log-group-name /aws/eks/$CLUSTER_NAME/cluster +``` + +--- + +## **๐Ÿณ ์ปจํ…Œ์ด๋„ˆ ํŒŒ๋“œ ๋กœ๊น…** + +### **1. NGINX ์›น์„œ๋ฒ„ ๋ฐฐํฌ with Ingress(ALB)** + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update + +# ๊ฒฐ๊ณผ +"bitnami" has been added to your repositories +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "eks" chart repository +...Successfully got an update from the "prometheus-community" chart repository +...Successfully got an update from the "geek-cookbook" chart repository +...Successfully got an update from the "bitnami" chart repository +Update Complete. โŽˆHappy Helming!โŽˆ +``` + +### **2. ๋„๋ฉ”์ธ, ์ธ์ฆ์„œ ํ™•์ธ** + +```bash +echo $MyDomain $CERT_ARN +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +gagajin.com arn:aws:acm:ap-northeast-2:378102432899:certificate/f967e8ca-f0b5-471d-bbe4-bee231aeb32b +``` + +### **3. ํŒŒ๋ผ๋ฏธํ„ฐ ํŒŒ์ผ ์ƒ์„ฑ** + +```bash +cat < nginx-values.yaml +service: + type: NodePort + +networkPolicy: + enabled: false + +resourcesPreset: "nano" + +ingress: + enabled: true + ingressClassName: alb + hostname: nginx.$MyDomain + pathType: Prefix + path: / + annotations: + alb.ingress.kubernetes.io/certificate-arn: $CERT_ARN + alb.ingress.kubernetes.io/group.name: study + alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]' + alb.ingress.kubernetes.io/load-balancer-name: $CLUSTER_NAME-ingress-alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/ssl-redirect: "443" + alb.ingress.kubernetes.io/success-codes: 200-399 + alb.ingress.kubernetes.io/target-type: ip +EOT +``` + +### **4. ํŒŒ๋ผ๋ฏธํ„ฐ ํŒŒ์ผ ์กฐํšŒ** + +```bash +cat nginx-values.yaml +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +service: + type: NodePort + +networkPolicy: + enabled: false + +resourcesPreset: "nano" + +ingress: + enabled: true + ingressClassName: alb + hostname: nginx.gagajin.com + pathType: Prefix + path: / + annotations: + alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:ap-northeast-2:378102432899:certificate/f967e8ca-f0b5-471d-bbe4-bee231aeb32b + alb.ingress.kubernetes.io/group.name: study + alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]' + alb.ingress.kubernetes.io/load-balancer-name: myeks-ingress-alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/ssl-redirect: "443" + alb.ingress.kubernetes.io/success-codes: 200-399 + alb.ingress.kubernetes.io/target-type: ip +``` + +### **5. ๋ฐฐํฌ** + +```bash +helm install nginx bitnami/nginx --version 19.0.0 -f nginx-values.yaml +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME: nginx +LAST DEPLOYED: Wed Feb 26 21:22:14 2025 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +CHART NAME: nginx +CHART VERSION: 19.0.0 +APP VERSION: 1.27.4 + +Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami for more information. + +** Please be patient while the chart is being deployed ** +NGINX can be accessed through the following DNS name from within your cluster: + + nginx.default.svc.cluster.local (port 80) + +To access NGINX from outside the cluster, follow the steps below: + +1. Get the NGINX URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "NGINX URL: http://nginx.gagajin.com" + echo "$CLUSTER_IP nginx.gagajin.com" | sudo tee -a /etc/hosts + +WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs: + - cloneStaticSiteFromGit.gitSync.resources + - resources ++info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + +โš  SECURITY WARNING: Original containers have been substituted. This Helm chart was designed, tested, and validated on multiple platforms using a specific set of Bitnami and Tanzu Application Catalog containers. Substituting other containers is likely to cause degraded security and performance, broken chart features, and missing environment variables. + +Substituted images detected: + - docker.io/bitnami/nginx:1.27.4-debian-12-r1 + +โš  WARNING: Original containers have been retagged. Please note this Helm chart was tested, and validated on multiple platforms using a specific set of Tanzu Application Catalog containers. Substituting original image tags could cause unexpected behavior. + +Retagged images: + - docker.io/bitnami/nginx:1.27.4-debian-12-r1 +``` + +### **6. Ingress, Deployment, ์„œ๋น„์Šค, ์—”๋“œํฌ์ธํŠธ ์กฐํšŒ - Nginx** + +```bash +kubectl get ingress,deploy,svc,ep nginx +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME CLASS HOSTS ADDRESS PORTS AGE +ingress.networking.k8s.io/nginx alb nginx.gagajin.com myeks-ingress-alb-77245841.ap-northeast-2.elb.amazonaws.com 80 64s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/nginx 1/1 1 1 64s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/nginx NodePort 10.100.47.81 80:32507/TCP,443:32693/TCP 64s + +NAME ENDPOINTS AGE +endpoints/nginx 192.168.2.63:8443,192.168.2.63:8080 64s +``` + +### **7. ๋ฐฐํฌ ์ƒ์„ธ ์ •๋ณด ํ™•์ธ - Nginx** + +```bash +kubectl describe deploy nginx +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Name: nginx +Namespace: default +CreationTimestamp: Wed, 26 Feb 2025 21:22:15 +0900 +Labels: app.kubernetes.io/instance=nginx + app.kubernetes.io/managed-by=Helm + app.kubernetes.io/name=nginx + app.kubernetes.io/version=1.27.4 + helm.sh/chart=nginx-19.0.0 +Annotations: deployment.kubernetes.io/revision: 1 + meta.helm.sh/release-name: nginx + meta.helm.sh/release-namespace: default +Selector: app.kubernetes.io/instance=nginx,app.kubernetes.io/name=nginx +Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable +StrategyType: RollingUpdate +MinReadySeconds: 0 +RollingUpdateStrategy: 25% max unavailable, 25% max surge +Pod Template: + Labels: app.kubernetes.io/instance=nginx + app.kubernetes.io/managed-by=Helm + app.kubernetes.io/name=nginx + app.kubernetes.io/version=1.27.4 + helm.sh/chart=nginx-19.0.0 + Service Account: nginx + Init Containers: + preserve-logs-symlinks: + Image: docker.io/bitnami/nginx:1.27.4-debian-12-r1 + Port: + Host Port: + SeccompProfile: RuntimeDefault + Command: + /bin/bash + Args: + -ec + #!/bin/bash + . /opt/bitnami/scripts/libfs.sh + # We copy the logs folder because it has symlinks to stdout and stderr + if ! is_dir_empty /opt/bitnami/nginx/logs; then + cp -r /opt/bitnami/nginx/logs /emptydir/app-logs-dir + fi + + Limits: + cpu: 150m + ephemeral-storage: 2Gi + memory: 192Mi + Requests: + cpu: 100m + ephemeral-storage: 50Mi + memory: 128Mi + Environment: + Mounts: + /emptydir from empty-dir (rw) + Containers: + nginx: + Image: docker.io/bitnami/nginx:1.27.4-debian-12-r1 + Ports: 8080/TCP, 8443/TCP + Host Ports: 0/TCP, 0/TCP + SeccompProfile: RuntimeDefault + Limits: + cpu: 150m + ephemeral-storage: 2Gi + memory: 192Mi + Requests: + cpu: 100m + ephemeral-storage: 50Mi + memory: 128Mi + Liveness: tcp-socket :http delay=30s timeout=5s period=10s #success=1 #failure=6 + Readiness: http-get http://:http/ delay=5s timeout=3s period=5s #success=1 #failure=3 + Environment: + BITNAMI_DEBUG: false + NGINX_HTTP_PORT_NUMBER: 8080 + NGINX_HTTPS_PORT_NUMBER: 8443 + Mounts: + /certs from certificate (rw) + /opt/bitnami/nginx/conf from empty-dir (rw,path="app-conf-dir") + /opt/bitnami/nginx/logs from empty-dir (rw,path="app-logs-dir") + /opt/bitnami/nginx/tmp from empty-dir (rw,path="app-tmp-dir") + /tmp from empty-dir (rw,path="tmp-dir") + Volumes: + empty-dir: + Type: EmptyDir (a temporary directory that shares a pod's lifetime) + Medium: + SizeLimit: + certificate: + Type: Secret (a volume populated by a Secret) + SecretName: nginx-tls + Optional: false + Node-Selectors: + Tolerations: +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable +OldReplicaSets: +NewReplicaSet: nginx-7c94c9bdcb (1/1 replicas created) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 102s deployment-controller Scaled up replica set nginx-7c94c9bdcb to 1 +``` + +### **8. ALB TargetGroupBindings ํ™•์ธ** + +```bash +kubectl get targetgroupbindings +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME SERVICE-NAME SERVICE-PORT TARGET-TYPE AGE +k8s-default-nginx-342d095714 nginx http ip 2m8s +k8s-default-productp-d3c7ff7881 productpage 9080 ip 21h +``` + +### **9. ์ ‘์† ์ฃผ์†Œ ํ™•์ธ ๋ฐ ์ ‘์†** + +**(1) ํ™•์ธ** + +```bash +echo -e "Nginx WebServer URL = https://nginx.$MyDomain" +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Nginx WebServer URL = https://nginx.gagajin.com +``` + +**(2) ์ ‘์†** + +```bash +curl -s https://nginx.$MyDomain +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash + + + +Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and +working. Further configuration is required.

+ +

For online documentation and support please refer to +nginx.org.
+Commercial support is available at +nginx.com.

+ +

Thank you for using nginx.

+ + +``` + +### **10. ๋ฐ˜๋ณต ์ ‘์†** + +**nginx ๋กœ๊ทธ ๋ชจ๋‹ˆํ„ฐ๋ง์„ ์œ„ํ•ด ๋ฐ˜๋ณต ์ ‘์† ์„ค์ •** + +```bash +while true; do curl -s https://nginx.$MyDomain -I | head -n 1; date; sleep 1; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +HTTP/2 200 +Wed Feb 26 09:27:31 PM KST 2025 +HTTP/2 200 +Wed Feb 26 09:27:33 PM KST 2025 +HTTP/2 200 +Wed Feb 26 09:27:34 PM KST 2025 +... +``` + +--- + +## **โ˜๏ธ CloudWatch Container observability ์„ค์น˜** + +### **1. IRSA ์„ค์ •** + +```bash +eksctl create iamserviceaccount \ + --name cloudwatch-agent \ + --namespace amazon-cloudwatch --cluster $CLUSTER_NAME \ + --role-name $CLUSTER_NAME-cloudwatch-agent-role \ + --attach-policy-arn arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy \ + --role-only \ + --approve +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +2025-02-26 21:46:00 [โ„น] 1 existing iamserviceaccount(s) (kube-system/aws-load-balancer-controller) will be excluded +2025-02-26 21:46:00 [โ„น] 1 iamserviceaccount (amazon-cloudwatch/cloudwatch-agent) was included (based on the include/exclude rules) +2025-02-26 21:46:00 [!] serviceaccounts in Kubernetes will not be created or modified, since the option --role-only is used +2025-02-26 21:46:00 [โ„น] 1 task: { create IAM role for serviceaccount "amazon-cloudwatch/cloudwatch-agent" } +2025-02-26 21:46:00 [โ„น] building iamserviceaccount stack "eksctl-myeks-addon-iamserviceaccount-amazon-cloudwatch-cloudwatch-agent" +2025-02-26 21:46:00 [โ„น] deploying stack "eksctl-myeks-addon-iamserviceaccount-amazon-cloudwatch-cloudwatch-agent" +2025-02-26 21:46:00 [โ„น] waiting for CloudFormation stack "eksctl-myeks-addon-iamserviceaccount-amazon-cloudwatch-cloudwatch-agent" +2025-02-26 21:46:30 [โ„น] waiting for CloudFormation stack "eksctl-myeks-addon-iamserviceaccount-amazon-cloudwatch-cloudwatch-agent" +``` + +### **2. addon ๋ฐฐํฌ** + +**(1) AWS ๊ณ„์ • ID ์กฐํšŒ ๋ฐ ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •** + +```bash +export ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text) +``` + +**(2) EKS ํด๋Ÿฌ์Šคํ„ฐ์— amazon-cloudwatch-observability ์• ๋“œ์˜จ ๋ฐฐํฌ** + +```bash +aws eks create-addon --addon-name amazon-cloudwatch-observability --cluster-name $CLUSTER_NAME --service-account-role-arn arn:aws:iam::$ACCOUNT_ID:role/$CLUSTER_NAME-cloudwatch-agent-role +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +{ + "addon": { + "addonName": "amazon-cloudwatch-observability", + "clusterName": "myeks", + "status": "CREATING", + "addonVersion": "v3.3.1-eksbuild.1", + "health": { + "issues": [] + }, + "addonArn": "arn:aws:eks:ap-northeast-2:378102432899:addon/myeks/amazon-cloudwatch-observability/b8caa12a-714e-a4b9-05b5-a368820767bf", + "createdAt": "2025-02-26T21:56:54.393000+09:00", + "modifiedAt": "2025-02-26T21:56:54.411000+09:00", + "serviceAccountRoleArn": "arn:aws:iam::378102432899:role/myeks-cloudwatch-agent-role", + "tags": {} + } +} +``` + +### **3. addon ํ™•์ธ** + +```bash +aws eks list-addons --cluster-name myeks --output table +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +--------------------------------------- +| ListAddons | ++-------------------------------------+ +|| addons || +|+-----------------------------------+| +|| amazon-cloudwatch-observability || +|| aws-ebs-csi-driver || +|| coredns || +|| kube-proxy || +|| metrics-server || +|| vpc-cni || +|+-----------------------------------+| +``` + +- EKS ํด๋Ÿฌ์Šคํ„ฐ์— amazon-cloudwatch-observability ์• ๋“œ์˜จ์ด ์ถ”๊ฐ€๋จ + +### **4. CRD ๋ฐฐํฌ ํ™•์ธ** + +```bash +kubectl get crd | grep -i cloudwatch +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +amazoncloudwatchagents.cloudwatch.aws.amazon.com 2025-02-26T12:57:16Z +dcgmexporters.cloudwatch.aws.amazon.com 2025-02-26T12:57:17Z +instrumentations.cloudwatch.aws.amazon.com 2025-02-26T12:57:17Z +neuronmonitors.cloudwatch.aws.amazon.com 2025-02-26T12:57:17Z +``` + +- CloudWatch ๊ด€๋ จ 4๊ฐœ์˜ CRD๊ฐ€ ๋ฐฐํฌ๋จ + +### **5. amazon-cloudwatch ๋„ค์ž„์ŠคํŽ˜์ด์Šค ๋ฆฌ์†Œ์Šค ์กฐํšŒ** + +```bash +kubectl get ds,pod,cm,sa,amazoncloudwatchagent -n amazon-cloudwatch +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +daemonset.apps/cloudwatch-agent 3 3 3 3 3 kubernetes.io/os=linux 14m +daemonset.apps/cloudwatch-agent-windows 0 0 0 0 0 kubernetes.io/os=windows 14m +daemonset.apps/cloudwatch-agent-windows-container-insights 0 0 0 0 0 kubernetes.io/os=windows 14m +daemonset.apps/dcgm-exporter 0 0 0 0 0 kubernetes.io/os=linux 14m +daemonset.apps/fluent-bit 3 3 3 3 3 kubernetes.io/os=linux 14m +daemonset.apps/fluent-bit-windows 0 0 0 0 0 kubernetes.io/os=windows 14m +daemonset.apps/neuron-monitor 0 0 0 0 0 14m + +NAME READY STATUS RESTARTS AGE +pod/amazon-cloudwatch-observability-controller-manager-6f76854spzlp 1/1 Running 0 14m +pod/cloudwatch-agent-87hnx 1/1 Running 0 14m +pod/cloudwatch-agent-h6rpg 1/1 Running 0 14m +pod/cloudwatch-agent-kfwzk 1/1 Running 0 14m +pod/fluent-bit-8264s 1/1 Running 0 14m +pod/fluent-bit-9l69f 1/1 Running 0 14m +pod/fluent-bit-tfjbr 1/1 Running 0 14m + +NAME DATA AGE +configmap/cloudwatch-agent 1 14m +configmap/cloudwatch-agent-windows 1 14m +configmap/cloudwatch-agent-windows-container-insights 1 14m +configmap/cwagent-clusterleader 0 14m +configmap/dcgm-exporter-config-map 2 14m +configmap/fluent-bit-config 5 14m +configmap/fluent-bit-windows-config 5 14m +configmap/kube-root-ca.crt 1 14m +configmap/neuron-monitor-config-map 1 14m + +NAME SECRETS AGE +serviceaccount/amazon-cloudwatch-observability-controller-manager 0 14m +serviceaccount/cloudwatch-agent 0 14m +serviceaccount/dcgm-exporter-service-acct 0 14m +serviceaccount/default 0 14m +serviceaccount/neuron-monitor-service-acct 0 14m + +NAME MODE VERSION READY AGE IMAGE MANAGEMENT +amazoncloudwatchagent.cloudwatch.aws.amazon.com/cloudwatch-agent daemonset 0.0.0 14m managed +amazoncloudwatchagent.cloudwatch.aws.amazon.com/cloudwatch-agent-windows daemonset 0.0.0 14m managed +amazoncloudwatchagent.cloudwatch.aws.amazon.com/cloudwatch-agent-windows-container-insights daemonset 0.0.0 14m managed +``` + +- ๋ชจ๋“  ๋…ธ๋“œ์—์„œ ๋กœ๊ทธ๋ฅผ ์ˆ˜์ง‘ํ•˜๊ธฐ ์œ„ํ•ด ๋ฆฌ๋ˆ…์Šค์šฉ `cloudwatch-agent` ๋ฐ๋ชฌ์…‹์ด ๋…ธ๋“œ ์ˆ˜์— ๋งž๊ฒŒ ๋ฐฐํฌ๋จ +- Windows ๋…ธ๋“œ๋Š” ์กด์žฌํ•˜์ง€ ์•Š์•„ `cloudwatch-agent-windows` ๋ฐ๋ชฌ์…‹์€ 0์ž„ + +### **6. ๋…ธ๋“œ์˜ ๋กœ๊ทธ ํ™•์ธ** + +**(1) application ๋กœ๊ทธ ์†Œ์Šค(All log files inย `/var/log/containers` โ†’ ์‹ฌ๋ณผ๋ฆญ ๋งํฌ `/var/log/pods/<์ปจํ…Œ์ด๋„ˆ>`, ๊ฐ ์ปจํ…Œ์ด๋„ˆ/ํŒŒ๋“œ ๋กœ๊ทธ** + +```bash +for node in $N1 $N2 $N3; do echo ">>>>> $node <<<<<"; ssh ec2-user@$node sudo tree /var/log/containers; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>>>>> 15.164.227.37 <<<<< +/var/log/containers +โ”œโ”€โ”€ aws-load-balancer-controller-554fbd9d-kbctn_kube-system_aws-load-balancer-controller-ae03ebdd899a3b4960dc6eaf290d27f46ceff51545b68f2dad3b013ed05cf9a4.log -> /var/log/pods/kube-system_aws-load-balancer-controller-554fbd9d-kbctn_dc66f5b2-ef3f-4ea7-908f-0db4fbd2f275/aws-load-balancer-controller/0.log +โ”œโ”€โ”€ aws-node-r5nbp_kube-system_aws-eks-nodeagent-97deaf50ee032a4728028115ecadc4df9800f763105986e526a375367aaff758.log -> /var/log/pods/kube-system_aws-node-r5nbp_d1476562-ea81-46d8-bc36-4da33468aaa5/aws-eks-nodeagent/0.log +โ”œโ”€โ”€ aws-node-r5nbp_kube-system_aws-node-b011407ecb43724bd80edbfe11b5a306901b691b00b96400e4ce7c84666a967a.log -> /var/log/pods/kube-system_aws-node-r5nbp_d1476562-ea81-46d8-bc36-4da33468aaa5/aws-node/0.log +โ”œโ”€โ”€ aws-node-r5nbp_kube-system_aws-vpc-cni-init-4c907842128039d3ff9c6ecaf89063504f5d80438084589f6a44445e6c54c591.log -> /var/log/pods/kube-system_aws-node-r5nbp_d1476562-ea81-46d8-bc36-4da33468aaa5/aws-vpc-cni-init/0.log +โ”œโ”€โ”€ cloudwatch-agent-kfwzk_amazon-cloudwatch_otc-container-b64522fc2d70ee5ad6d8de6d49d866d636764c86f9231f18c9eb004c8587b20b.log -> /var/log/pods/amazon-cloudwatch_cloudwatch-agent-kfwzk_a5b6a6d2-d0c3-45de-89ac-d9c865c553f4/otc-container/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-z4t4z_kube-system_csi-attacher-e6c94cfbc24f90637b32b7f596daec2007e13a35c61794bd5429fc573d2de07a.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-z4t4z_5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee/csi-attacher/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-z4t4z_kube-system_csi-provisioner-903745a8a728c5de43eca34f2012a52a6ef0fe103722048a135d82fab33c9226.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-z4t4z_5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee/csi-provisioner/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-z4t4z_kube-system_csi-resizer-37d345a943121b4e5d348d3147b7928fbf94783ea87baf7e1a29bee600e44c6b.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-z4t4z_5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee/csi-resizer/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-z4t4z_kube-system_csi-snapshotter-796eb70b19fb6e9a72c8cae8d8f52762c0d0da4431f45eeb10dfeed6de63172d.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-z4t4z_5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee/csi-snapshotter/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-z4t4z_kube-system_ebs-plugin-d50cc0f88f910d81a23c77774d8fe84f451bfa825496e60ececc442552eb1639.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-z4t4z_5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee/ebs-plugin/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-z4t4z_kube-system_liveness-probe-8bb874394b3681e04338edd503c4a560c7ebabd2a6608261c88fa7e70529695b.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-z4t4z_5b150c0c-9d2b-4c5f-ba59-bbcff29bb0ee/liveness-probe/0.log +โ”œโ”€โ”€ ebs-csi-node-j46zg_kube-system_ebs-plugin-0824d0fbca762ba2532e33435a5b448a400f52b5cd7516fc7671a5845605d51e.log -> /var/log/pods/kube-system_ebs-csi-node-j46zg_d160163c-dd04-4259-ab96-f6850b5a407a/ebs-plugin/0.log +โ”œโ”€โ”€ ebs-csi-node-j46zg_kube-system_liveness-probe-3ca5cf0a203bb1eb886f05c02c308e79fd6e79777bbc98635d28e59fee8c88f8.log -> /var/log/pods/kube-system_ebs-csi-node-j46zg_d160163c-dd04-4259-ab96-f6850b5a407a/liveness-probe/0.log +โ”œโ”€โ”€ ebs-csi-node-j46zg_kube-system_node-driver-registrar-4bfca921825f106ecd08694b261b361499bc612b6e1b3b34537b30edaea8d251.log -> /var/log/pods/kube-system_ebs-csi-node-j46zg_d160163c-dd04-4259-ab96-f6850b5a407a/node-driver-registrar/0.log +โ”œโ”€โ”€ external-dns-dc4878f5f-fskxk_kube-system_external-dns-4c9f28bd84caed6a9f6e345991516527d2bcb271b1b161c5a58fd3c7710c7d5d.log -> /var/log/pods/kube-system_external-dns-dc4878f5f-fskxk_2f69ba60-e611-4718-8a4d-ea14f438867e/external-dns/0.log +โ”œโ”€โ”€ fluent-bit-tfjbr_amazon-cloudwatch_fluent-bit-948fb449154984e2ceff0ed1ca6cb86fdad141ca144e1995353f35647e7e0207.log -> /var/log/pods/amazon-cloudwatch_fluent-bit-tfjbr_6280ec3f-666d-485a-be7a-9756737ee8cd/fluent-bit/0.log +โ”œโ”€โ”€ kube-prometheus-stack-grafana-0_monitoring_grafana-07ff1d680b4d33ce368a00dcf109cb4278046c9d6b98b63c998de65312f4ac73.log -> /var/log/pods/monitoring_kube-prometheus-stack-grafana-0_b098dea3-a8ec-4fb1-a7b0-ff701a65f0ba/grafana/0.log +โ”œโ”€โ”€ kube-prometheus-stack-grafana-0_monitoring_grafana-sc-dashboard-d15b03ba0da8001c49f9ae652817fdd8b4b650dbd864b9e9fb772fcefb1645d4.log -> /var/log/pods/monitoring_kube-prometheus-stack-grafana-0_b098dea3-a8ec-4fb1-a7b0-ff701a65f0ba/grafana-sc-dashboard/0.log +โ”œโ”€โ”€ kube-prometheus-stack-grafana-0_monitoring_grafana-sc-datasources-31143a8a39f55140e75cbff4d3eb9d0e011582c8a15c2400287486b0f6f17014.log -> /var/log/pods/monitoring_kube-prometheus-stack-grafana-0_b098dea3-a8ec-4fb1-a7b0-ff701a65f0ba/grafana-sc-datasources/0.log +โ”œโ”€โ”€ kube-prometheus-stack-grafana-0_monitoring_init-chown-data-be60a46537cc11fbbb453426899a8b9918ac0a922e59505d0b0ebf9cbb6846a2.log -> /var/log/pods/monitoring_kube-prometheus-stack-grafana-0_b098dea3-a8ec-4fb1-a7b0-ff701a65f0ba/init-chown-data/0.log +โ”œโ”€โ”€ kube-prometheus-stack-prometheus-node-exporter-lt42c_monitoring_node-exporter-01861c25bd8c9d23b5535cf83af5dc3fea762d1cf1b35475dcbd351158c88cd1.log -> /var/log/pods/monitoring_kube-prometheus-stack-prometheus-node-exporter-lt42c_ec3fd0cb-6046-44ea-a6be-30e58ac0cbd2/node-exporter/0.log +โ”œโ”€โ”€ kube-proxy-v8nh9_kube-system_kube-proxy-a99b17ae17e49653b07611fd64abd6cb31026644be61bbc40bc4829cb9ad4fbc.log -> /var/log/pods/kube-system_kube-proxy-v8nh9_3a48dd47-bf53-4ec0-8060-2db03b661f8c/kube-proxy/0.log +โ”œโ”€โ”€ ratings-v1-65f797b499-fs82w_default_ratings-0bf6628633bfcca12bbdbbe1dfab7eb5c7ab97421a3fcc08ab3799287fc94dd5.log -> /var/log/pods/default_ratings-v1-65f797b499-fs82w_5ed60936-90e6-43bb-8ff9-cc48e8513767/ratings/0.log +โ””โ”€โ”€ reviews-v2-65cb66b45c-8rvgv_default_reviews-e1bf161e3331be5881dbc3c17a7ab70450454eb4242e8e4b0cd8d41ba5d3f74d.log -> /var/log/pods/default_reviews-v2-65cb66b45c-8rvgv_e577fc82-eff2-4eaf-9534-7b2a0e6d4851/reviews/0.log + +0 directories, 24 files + +>>>>> 3.38.205.159 <<<<< +/var/log/containers +โ”œโ”€โ”€ amazon-cloudwatch-observability-controller-manager-6f76854spzlp_amazon-cloudwatch_manager-21d672489b323c5ddde198ca793164cf1d83ada8a0aaa6ca1f271b17631174e3.log -> /var/log/pods/amazon-cloudwatch_amazon-cloudwatch-observability-controller-manager-6f76854spzlp_8c35db22-377e-4b33-819b-fa7324622171/manager/0.log +โ”œโ”€โ”€ aws-load-balancer-controller-554fbd9d-mtlsg_kube-system_aws-load-balancer-controller-f3c273a6d3458a8641e154652ae4ddac77f9dac11a9989c19a2d8b1e649f152a.log -> /var/log/pods/kube-system_aws-load-balancer-controller-554fbd9d-mtlsg_5819b483-9f6b-4aee-96ac-2a6308632fc3/aws-load-balancer-controller/0.log +โ”œโ”€โ”€ aws-node-qgc5t_kube-system_aws-eks-nodeagent-329a39ed4aa628e60b6de898061099e81899cf47eaea11b4de183bc047b0c273.log -> /var/log/pods/kube-system_aws-node-qgc5t_310d7c55-bc9e-474a-928f-b19218586bb2/aws-eks-nodeagent/0.log +โ”œโ”€โ”€ aws-node-qgc5t_kube-system_aws-node-131feacde9ef43a840464fab4dfdcca474f22bf4c729cb72eb06a6b0e52775aa.log -> /var/log/pods/kube-system_aws-node-qgc5t_310d7c55-bc9e-474a-928f-b19218586bb2/aws-node/0.log +โ”œโ”€โ”€ aws-node-qgc5t_kube-system_aws-vpc-cni-init-0955e671e6632f35365190de7baf154098e1874e15da88593d58c4c8b60b0ebe.log -> /var/log/pods/kube-system_aws-node-qgc5t_310d7c55-bc9e-474a-928f-b19218586bb2/aws-vpc-cni-init/0.log +โ”œโ”€โ”€ cloudwatch-agent-h6rpg_amazon-cloudwatch_otc-container-60691c257f2dc3f6c88c750c4b62a063881ebd20191d22c0931c3e39edc530f0.log -> /var/log/pods/amazon-cloudwatch_cloudwatch-agent-h6rpg_acdaf801-9bdc-442a-aa75-382c955a6877/otc-container/0.log +โ”œโ”€โ”€ details-v1-79dfbd6fff-584wb_default_details-b3894830ed1617fe5450afd0a995fd7b73b29cba7926429c5d097bde263655fc.log -> /var/log/pods/default_details-v1-79dfbd6fff-584wb_b48b1f96-e092-46d5-8e67-cd82ac2e6c37/details/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-p57xw_kube-system_csi-attacher-1e5760913f98d5f0bbe1a2b361ad912c868db722f01b60a9a7ce90a109e70b5d.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-p57xw_4cf5ca4a-fae6-490f-9757-6fb2f93248bf/csi-attacher/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-p57xw_kube-system_csi-provisioner-be3e325f052fe16b3092a07e91e114dc9dc432e5d22f243e6588827c212bc6a7.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-p57xw_4cf5ca4a-fae6-490f-9757-6fb2f93248bf/csi-provisioner/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-p57xw_kube-system_csi-resizer-bc3fcf25d070d8aba39417b1366d13792ca13effda7ccee5eac9c350aacf335f.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-p57xw_4cf5ca4a-fae6-490f-9757-6fb2f93248bf/csi-resizer/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-p57xw_kube-system_csi-snapshotter-b5c546a2c80d6b12dc57145efadd7096af9b26a617c96348e96c76dabfc6e9aa.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-p57xw_4cf5ca4a-fae6-490f-9757-6fb2f93248bf/csi-snapshotter/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-p57xw_kube-system_ebs-plugin-1f144c5d362b9aa9defc70faceceeca9aaf5bf2432ff29094ca7f14aff0d7286.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-p57xw_4cf5ca4a-fae6-490f-9757-6fb2f93248bf/ebs-plugin/0.log +โ”œโ”€โ”€ ebs-csi-controller-7f8f8cb84-p57xw_kube-system_liveness-probe-5df88285ad500c9f746b9e2a33c300935449361510cf5257f5927bc244a00be2.log -> /var/log/pods/kube-system_ebs-csi-controller-7f8f8cb84-p57xw_4cf5ca4a-fae6-490f-9757-6fb2f93248bf/liveness-probe/0.log +โ”œโ”€โ”€ ebs-csi-node-hx5jb_kube-system_ebs-plugin-3752c118d606b7aef00dc996750ad7a5ceb8cdd6c1fe52f6be372bbf7ac7c75d.log -> /var/log/pods/kube-system_ebs-csi-node-hx5jb_7d00ede6-b633-4562-ade4-5c61317bf843/ebs-plugin/0.log +โ”œโ”€โ”€ ebs-csi-node-hx5jb_kube-system_liveness-probe-b4704bbfc6c455d7d5783c46f5d7edeaab33a1ff639fb3e705ae7339e7cc2fee.log -> /var/log/pods/kube-system_ebs-csi-node-hx5jb_7d00ede6-b633-4562-ade4-5c61317bf843/liveness-probe/0.log +โ”œโ”€โ”€ ebs-csi-node-hx5jb_kube-system_node-driver-registrar-075052709e559f266c4781c82fd440ea8a242b22ef144b992315505de11821eb.log -> /var/log/pods/kube-system_ebs-csi-node-hx5jb_7d00ede6-b633-4562-ade4-5c61317bf843/node-driver-registrar/0.log +โ”œโ”€โ”€ fluent-bit-9l69f_amazon-cloudwatch_fluent-bit-36dd0ff870718ead8bab9b0e527b10ce8ba557fc4fa0ca5739463cc3cbf06fbc.log -> /var/log/pods/amazon-cloudwatch_fluent-bit-9l69f_e07292b4-1788-44fb-8e6e-4d6da7d4bd24/fluent-bit/0.log +โ”œโ”€โ”€ kube-ops-view-657dbc6cd8-pxkvr_kube-system_kube-ops-view-03cea4cf3d718da69b11be38f26cea545a73de5e067e258a2defdfe2717a1b5a.log -> /var/log/pods/kube-system_kube-ops-view-657dbc6cd8-pxkvr_37bf2637-622f-4ee2-9804-a73d74e16f0e/kube-ops-view/0.log +โ”œโ”€โ”€ kube-prometheus-stack-prometheus-node-exporter-h442l_monitoring_node-exporter-990676e672a4e571e148c5fafb4a1a443ec0882f51c071a1ba4045888eedb35d.log -> /var/log/pods/monitoring_kube-prometheus-stack-prometheus-node-exporter-h442l_7627e5da-0717-4488-8ce1-c0e3cb456fc1/node-exporter/0.log +โ”œโ”€โ”€ kube-proxy-z9l58_kube-system_kube-proxy-c8a65c0d7d4f876e3eec50f7adcd3a349af990918e6affddc365e25ecfc58265.log -> /var/log/pods/kube-system_kube-proxy-z9l58_143e79b7-03d1-4bc9-af70-4e316835bebb/kube-proxy/0.log +โ”œโ”€โ”€ nginx-7c94c9bdcb-9g66b_default_nginx-caae567c481a991fbfc88920be6f7ad4b747d7157917de3f84861f2e4f9214d1.log -> /var/log/pods/default_nginx-7c94c9bdcb-9g66b_cfa0d8d5-8e43-4b49-8cac-2467feac7430/nginx/0.log +โ”œโ”€โ”€ nginx-7c94c9bdcb-9g66b_default_preserve-logs-symlinks-05070e719f8e24ce045ecb8acafe47cd184eefaa68e42b7d929232b8f91c9093.log -> /var/log/pods/default_nginx-7c94c9bdcb-9g66b_cfa0d8d5-8e43-4b49-8cac-2467feac7430/preserve-logs-symlinks/0.log +โ”œโ”€โ”€ productpage-v1-dffc47f64-t8bs9_default_productpage-a045f8233a3098e7bb02b6487751f7a2780ed628233317d1c45fa22103ecfebe.log -> /var/log/pods/default_productpage-v1-dffc47f64-t8bs9_82e8306c-9a93-42ad-a5c8-6b7960291bed/productpage/0.log +โ””โ”€โ”€ reviews-v1-5c4d6d447c-7b69s_default_reviews-a0c4cb095ac797334d9f33a2d5cfac40849e1aae7099f760d8fde06aa8cf29e3.log -> /var/log/pods/default_reviews-v1-5c4d6d447c-7b69s_440483a1-6ed2-4f1b-825c-73b5ba45393c/reviews/0.log + +0 directories, 24 files + +>>>>> 43.200.163.0 <<<<< +/var/log/containers +โ”œโ”€โ”€ aws-node-p4v96_kube-system_aws-eks-nodeagent-29c619954bdcd1b8e8fe69944adc0a11efea521f7af573163f02e141836fdff8.log -> /var/log/pods/kube-system_aws-node-p4v96_c7f74999-0576-407f-8bb6-7363e257d07c/aws-eks-nodeagent/0.log +โ”œโ”€โ”€ aws-node-p4v96_kube-system_aws-node-055f423d886edae30761d2c3cb3e0e6180947dcffa411525d1aadaaa1adff9a5.log -> /var/log/pods/kube-system_aws-node-p4v96_c7f74999-0576-407f-8bb6-7363e257d07c/aws-node/0.log +โ”œโ”€โ”€ aws-node-p4v96_kube-system_aws-vpc-cni-init-4ad0fce252c9c30b0572d945e8f7bf9fd3e25ecf52a7ea7e92ddc4c24c081c4a.log -> /var/log/pods/kube-system_aws-node-p4v96_c7f74999-0576-407f-8bb6-7363e257d07c/aws-vpc-cni-init/0.log +โ”œโ”€โ”€ cloudwatch-agent-87hnx_amazon-cloudwatch_otc-container-3970ce7ea4baccdfc1509e8f49e0a3bc647fe21be59ea60d25115d78df641de4.log -> /var/log/pods/amazon-cloudwatch_cloudwatch-agent-87hnx_97bb652a-8c32-42fb-a36b-f4c82301ca39/otc-container/0.log +โ”œโ”€โ”€ coredns-86f5954566-d6vwh_kube-system_coredns-47afd8adff91e102d81a8465187fa4142b5974a191adb3a66bee6a571c6ae410.log -> /var/log/pods/kube-system_coredns-86f5954566-d6vwh_44a7e298-9a71-45c3-b013-78fd714dc910/coredns/0.log +โ”œโ”€โ”€ ebs-csi-node-gdh58_kube-system_ebs-plugin-aadf0d58897fb5be64696bcea86e2069121224357deb2945b7d2209a712a6b92.log -> /var/log/pods/kube-system_ebs-csi-node-gdh58_125250cb-6169-4d7f-a67e-48f7a1dad6ef/ebs-plugin/0.log +โ”œโ”€โ”€ ebs-csi-node-gdh58_kube-system_liveness-probe-bd1fe3ce30be5008c7d12090f240b8bb11b2688c6b1837344df1a8f5bd894c42.log -> /var/log/pods/kube-system_ebs-csi-node-gdh58_125250cb-6169-4d7f-a67e-48f7a1dad6ef/liveness-probe/0.log +โ”œโ”€โ”€ ebs-csi-node-gdh58_kube-system_node-driver-registrar-0d2ae3e69caf9b791f1792fafa95b508d879e6e9bf1f667e6bd789db90c48076.log -> /var/log/pods/kube-system_ebs-csi-node-gdh58_125250cb-6169-4d7f-a67e-48f7a1dad6ef/node-driver-registrar/0.log +โ”œโ”€โ”€ fluent-bit-8264s_amazon-cloudwatch_fluent-bit-acfa337b0c7ed38f64db47b027408a27f82f5857c23a2a518ba476b9052c6479.log -> /var/log/pods/amazon-cloudwatch_fluent-bit-8264s_956023fe-06a8-43b7-87f9-eaa5b3ebf6ed/fluent-bit/0.log +โ”œโ”€โ”€ kube-prometheus-stack-kube-state-metrics-5dbfbd4b9-jgnqz_monitoring_kube-state-metrics-d936a02626be02bd153b9335309bdc58635eff941ad0a44024f62b227ae8a4d3.log -> /var/log/pods/monitoring_kube-prometheus-stack-kube-state-metrics-5dbfbd4b9-jgnqz_c3c88c9a-e333-45cc-94f3-17d463c1f6b5/kube-state-metrics/0.log +โ”œโ”€โ”€ kube-prometheus-stack-operator-76bdd654bf-st47m_monitoring_kube-prometheus-stack-920d8607cf65582926adda2de88a16c8bfd2497bf9a6e95b36a420ce070d23ba.log -> /var/log/pods/monitoring_kube-prometheus-stack-operator-76bdd654bf-st47m_4ddc004c-c404-4afd-87e0-5b20c6658f4a/kube-prometheus-stack/0.log +โ”œโ”€โ”€ kube-prometheus-stack-prometheus-node-exporter-kcpsg_monitoring_node-exporter-c55ed57dc100621c8862329c5ef6706661be5911e70543edec5d1e8f23440d10.log -> /var/log/pods/monitoring_kube-prometheus-stack-prometheus-node-exporter-kcpsg_8a6a5dd7-a90b-4b52-a9c0-0be029e6a60e/node-exporter/0.log +โ”œโ”€โ”€ kube-proxy-s6tdr_kube-system_kube-proxy-e9f14023b1fa22b7635908d1f4f0a281ef488fdd505596a1b0c6fc6197f50bd0.log -> /var/log/pods/kube-system_kube-proxy-s6tdr_73fab99f-ca37-4889-bde3-41edd1bdb6e9/kube-proxy/0.log +โ”œโ”€โ”€ metrics-server-6bf5998d9c-c8tbf_kube-system_metrics-server-11c9d62da3624469d83d05266914f32cb6eac70257e95fc25d7bd475155fb996.log -> /var/log/pods/kube-system_metrics-server-6bf5998d9c-c8tbf_438a7a99-9cdc-4162-a997-8079928f224e/metrics-server/0.log +โ”œโ”€โ”€ metrics-server-6bf5998d9c-tftq9_kube-system_metrics-server-0ceb1cf3e114275f884451369ee24c7e6c0cd2a28cd32bd4eeece8a12a5feaf0.log -> /var/log/pods/kube-system_metrics-server-6bf5998d9c-tftq9_8079e24b-42c0-4f1d-9fe2-7ca81b85cef1/metrics-server/0.log +โ”œโ”€โ”€ prometheus-kube-prometheus-stack-prometheus-0_monitoring_config-reloader-8d13b28d34f56a4929f1fb83781191d2a49f3a8b27e994e2a9d69ce2af86536e.log -> /var/log/pods/monitoring_prometheus-kube-prometheus-stack-prometheus-0_8ea36b37-436c-44d4-abee-8b9a013dc06e/config-reloader/0.log +โ”œโ”€โ”€ prometheus-kube-prometheus-stack-prometheus-0_monitoring_init-config-reloader-f3b7bc42e009d5ea5c0c765da46c9a3176dea606e3f019403d4483a5a916bc90.log -> /var/log/pods/monitoring_prometheus-kube-prometheus-stack-prometheus-0_8ea36b37-436c-44d4-abee-8b9a013dc06e/init-config-reloader/0.log +โ”œโ”€โ”€ prometheus-kube-prometheus-stack-prometheus-0_monitoring_prometheus-088bda24f30d1af1650405f1a411b15bb947f59868da68f56ac55f555fd8b3ea.log -> /var/log/pods/monitoring_prometheus-kube-prometheus-stack-prometheus-0_8ea36b37-436c-44d4-abee-8b9a013dc06e/prometheus/0.log +โ””โ”€โ”€ reviews-v3-f68f94645-nsbl8_default_reviews-783c91ace3e5873f33437ffa59ce1f210580e599d287b3cf725eced6386ca915.log -> /var/log/pods/default_reviews-v3-f68f94645-nsbl8_8cebd5e6-30ac-428f-b7d8-b694a2dd1e3a/reviews/0.log + +0 directories, 19 files +``` + +**(2) host ๋กœ๊ทธ ์†Œ์Šค(Logs fromย `/var/log/dmesg`,ย `/var/log/secure`, andย `/var/log/messages`), ๋…ธ๋“œ(ํ˜ธ์ŠคํŠธ) ๋กœ๊ทธ** + +```bash +for node in $N1 $N2 $N3; do echo ">>>>> $node <<<<<"; ssh ec2-user@$node sudo tree /var/log/ -L 1; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>>>>> 15.164.227.37 <<<<< +/var/log/ +โ”œโ”€โ”€ README -> ../../usr/share/doc/systemd/README.logs +โ”œโ”€โ”€ amazon +โ”œโ”€โ”€ audit +โ”œโ”€โ”€ aws-routed-eni +โ”œโ”€โ”€ btmp +โ”œโ”€โ”€ chrony +โ”œโ”€โ”€ cloud-init-output.log +โ”œโ”€โ”€ cloud-init.log +โ”œโ”€โ”€ containers +โ”œโ”€โ”€ dmesg +โ”œโ”€โ”€ dnf.librepo.log +โ”œโ”€โ”€ dnf.log +โ”œโ”€โ”€ dnf.rpm.log +โ”œโ”€โ”€ hawkey.log +โ”œโ”€โ”€ journal +โ”œโ”€โ”€ lastlog +โ”œโ”€โ”€ pods +โ”œโ”€โ”€ private +โ”œโ”€โ”€ sa +โ”œโ”€โ”€ tallylog +โ””โ”€โ”€ wtmp + +10 directories, 11 files + +>>>>> 3.38.205.159 <<<<< +/var/log/ +โ”œโ”€โ”€ README -> ../../usr/share/doc/systemd/README.logs +โ”œโ”€โ”€ amazon +โ”œโ”€โ”€ audit +โ”œโ”€โ”€ aws-routed-eni +โ”œโ”€โ”€ btmp +โ”œโ”€โ”€ chrony +โ”œโ”€โ”€ cloud-init-output.log +โ”œโ”€โ”€ cloud-init.log +โ”œโ”€โ”€ containers +โ”œโ”€โ”€ dmesg +โ”œโ”€โ”€ dnf.librepo.log +โ”œโ”€โ”€ dnf.log +โ”œโ”€โ”€ dnf.rpm.log +โ”œโ”€โ”€ hawkey.log +โ”œโ”€โ”€ journal +โ”œโ”€โ”€ lastlog +โ”œโ”€โ”€ pods +โ”œโ”€โ”€ private +โ”œโ”€โ”€ sa +โ”œโ”€โ”€ tallylog +โ””โ”€โ”€ wtmp + +10 directories, 11 files + +>>>>> 43.200.163.0 <<<<< +/var/log/ +โ”œโ”€โ”€ README -> ../../usr/share/doc/systemd/README.logs +โ”œโ”€โ”€ amazon +โ”œโ”€โ”€ audit +โ”œโ”€โ”€ aws-routed-eni +โ”œโ”€โ”€ btmp +โ”œโ”€โ”€ chrony +โ”œโ”€โ”€ cloud-init-output.log +โ”œโ”€โ”€ cloud-init.log +โ”œโ”€โ”€ containers +โ”œโ”€โ”€ dmesg +โ”œโ”€โ”€ dnf.librepo.log +โ”œโ”€โ”€ dnf.log +โ”œโ”€โ”€ dnf.rpm.log +โ”œโ”€โ”€ hawkey.log +โ”œโ”€โ”€ journal +โ”œโ”€โ”€ lastlog +โ”œโ”€โ”€ pods +โ”œโ”€โ”€ private +โ”œโ”€โ”€ sa +โ”œโ”€โ”€ tallylog +โ””โ”€โ”€ wtmp + +10 directories, 11 files +``` + +**(3) dataplane ๋กœ๊ทธ ์†Œ์Šค(`/var/log/journal`ย forย `kubelet.service`,ย `kubeproxy.service`, andย `docker.service`), ์ฟ ๋ฒ„๋„คํ‹ฐ์Šค ๋ฐ์ดํ„ฐํ”Œ๋ ˆ์ธ ๋กœ๊ทธ** + +```bash +for node in $N1 $N2 $N3; do echo ">>>>> $node <<<<<"; ssh ec2-user@$node sudo tree /var/log/journal -L 1; echo; done +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +>>>>> 15.164.227.37 <<<<< +/var/log/journal +โ”œโ”€โ”€ ec22753c501541d270d53e1cc6b319a7 +โ””โ”€โ”€ ec290d14e0f34366b2d3f2ea33b06253 + +2 directories, 0 files + +>>>>> 3.38.205.159 <<<<< +/var/log/journal +โ”œโ”€โ”€ ec22753c501541d270d53e1cc6b319a7 +โ””โ”€โ”€ ec23b4795af458cac1beebec40e88e9b + +2 directories, 0 files + +>>>>> 43.200.163.0 <<<<< +/var/log/journal +โ”œโ”€โ”€ ec203a98663eebbcd25282168fa4a01d +โ””โ”€โ”€ ec22753c501541d270d53e1cc6b319a7 + +2 directories, 0 files +``` + +### **7. cloudwatch-agent ์„ค์ • ํ™•์ธ** + +```bash +kubectl describe cm cloudwatch-agent -n amazon-cloudwatch +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Name: cloudwatch-agent +Namespace: amazon-cloudwatch +Labels: app.kubernetes.io/component=amazon-cloudwatch-agent + app.kubernetes.io/instance=amazon-cloudwatch.cloudwatch-agent + app.kubernetes.io/managed-by=amazon-cloudwatch-agent-operator + app.kubernetes.io/name=cloudwatch-agent + app.kubernetes.io/part-of=amazon-cloudwatch-agent + app.kubernetes.io/version=1.300052.0b1024 +Annotations: + +Data +==== +cwagentconfig.json: +---- +{"agent":{"region":"ap-northeast-2"},"logs":{"metrics_collected":{"application_signals":{"hosted_in":"myeks"},"kubernetes":{"cluster_name":"myeks","enhanced_container_insights":true}}},"traces":{"traces_collected":{"application_signals":{}}}} + +BinaryData +==== + +Events: +``` + +### **8. cloudwatch-agent ๋ฐ๋ชฌ์…‹ ์ƒ์„ธ ์กฐํšŒ** + +```bash +kubectl describe -n amazon-cloudwatch ds cloudwatch-agent +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Name: cloudwatch-agent +Selector: app.kubernetes.io/component=amazon-cloudwatch-agent,app.kubernetes.io/instance=amazon-cloudwatch.cloudwatch-agent,app.kubernetes.io/managed-by=amazon-cloudwatch-agent-operator,app.kubernetes.io/part-of=amazon-cloudwatch-agent +Node-Selector: kubernetes.io/os=linux +Labels: app.kubernetes.io/component=amazon-cloudwatch-agent + app.kubernetes.io/instance=amazon-cloudwatch.cloudwatch-agent + app.kubernetes.io/managed-by=amazon-cloudwatch-agent-operator + app.kubernetes.io/name=cloudwatch-agent + app.kubernetes.io/part-of=amazon-cloudwatch-agent + app.kubernetes.io/version=1.300052.0b1024 +Annotations: amazon-cloudwatch-agent-operator-config/sha256: f76363aaebaaaa494aa0e7edfc9a2329b93ab3e4ad302f54e8e3e2a284dbef1c + deprecated.daemonset.template.generation: 1 + prometheus.io/path: /metrics + prometheus.io/port: 8888 + prometheus.io/scrape: true +Desired Number of Nodes Scheduled: 3 +Current Number of Nodes Scheduled: 3 +Number of Nodes Scheduled with Up-to-date Pods: 3 +Number of Nodes Scheduled with Available Pods: 3 +Number of Nodes Misscheduled: 0 +Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed +Pod Template: + Labels: app.kubernetes.io/component=amazon-cloudwatch-agent + app.kubernetes.io/instance=amazon-cloudwatch.cloudwatch-agent + app.kubernetes.io/managed-by=amazon-cloudwatch-agent-operator + app.kubernetes.io/name=cloudwatch-agent + app.kubernetes.io/part-of=amazon-cloudwatch-agent + app.kubernetes.io/version=1.300052.0b1024 + Annotations: amazon-cloudwatch-agent-operator-config/sha256: f76363aaebaaaa494aa0e7edfc9a2329b93ab3e4ad302f54e8e3e2a284dbef1c + prometheus.io/path: /metrics + prometheus.io/port: 8888 + prometheus.io/scrape: true + Service Account: cloudwatch-agent + Containers: + otc-container: + Image: 602401143452.dkr.ecr.ap-northeast-2.amazonaws.com/eks/observability/cloudwatch-agent:1.300052.0b1024 + Ports: 4315/TCP, 4316/TCP, 2000/TCP, 4311/TCP + Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP + Limits: + cpu: 500m + memory: 512Mi + Requests: + cpu: 250m + memory: 128Mi + Environment: + K8S_NODE_NAME: (v1:spec.nodeName) + HOST_IP: (v1:status.hostIP) + HOST_NAME: (v1:spec.nodeName) + K8S_NAMESPACE: (v1:metadata.namespace) + POD_NAME: (v1:metadata.name) + Mounts: + /dev/disk from devdisk (ro) + /etc/amazon-cloudwatch-observability-agent-cert from agenttls (ro) + /etc/amazon-cloudwatch-observability-agent-client-cert from agentclienttls (ro) + /etc/amazon-cloudwatch-observability-agent-server-cert from agentservertls (ro) + /etc/cwagentconfig from otc-internal (rw) + /rootfs from rootfs (ro) + /run/containerd/containerd.sock from containerdsock (rw) + /sys from sys (ro) + /var/lib/docker from varlibdocker (ro) + /var/lib/kubelet/pod-resources from kubelet-podresources (rw) + /var/run/docker.sock from dockersock (ro) + Volumes: + otc-internal: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: cloudwatch-agent + Optional: false + kubelet-podresources: + Type: HostPath (bare host directory volume) + Path: /var/lib/kubelet/pod-resources + HostPathType: Directory + rootfs: + Type: HostPath (bare host directory volume) + Path: / + HostPathType: + dockersock: + Type: HostPath (bare host directory volume) + Path: /var/run/docker.sock + HostPathType: + varlibdocker: + Type: HostPath (bare host directory volume) + Path: /var/lib/docker + HostPathType: + containerdsock: + Type: HostPath (bare host directory volume) + Path: /run/containerd/containerd.sock + HostPathType: + sys: + Type: HostPath (bare host directory volume) + Path: /sys + HostPathType: + devdisk: + Type: HostPath (bare host directory volume) + Path: /dev/disk/ + HostPathType: + agenttls: + Type: Secret (a volume populated by a Secret) + SecretName: amazon-cloudwatch-observability-agent-cert + Optional: false + agentclienttls: + Type: Secret (a volume populated by a Secret) + SecretName: amazon-cloudwatch-observability-agent-client-cert + Optional: false + agentservertls: + Type: Secret (a volume populated by a Secret) + SecretName: amazon-cloudwatch-observability-agent-server-cert + Optional: false + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 50m daemonset-controller Created pod: cloudwatch-agent-87hnx + Normal SuccessfulCreate 50m daemonset-controller Created pod: cloudwatch-agent-h6rpg + Normal SuccessfulCreate 50m daemonset-controller Created pod: cloudwatch-agent-kfwzk +``` + +### **9. Fluent Bit ๋กœ๊ทธ INPUT/FILTER/OUTPUT ์„ค์ • ํ™•์ธ** + +```bash +kubectl describe cm fluent-bit-config -n amazon-cloudwatch +``` + +โœ…ย **์ถœ๋ ฅ** + +```bash +Name: fluent-bit-config +Namespace: amazon-cloudwatch +Labels: k8s-app=fluent-bit +Annotations: + +Data +==== +application-log.conf: +---- +[INPUT] + Name tail + Tag application.* + Exclude_Path /var/log/containers/cloudwatch-agent*, /var/log/containers/fluent-bit*, /var/log/containers/aws-node*, /var/log/containers/kube-proxy* + Path /var/log/containers/*.log + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_container.db + Mem_Buf_Limit 50MB + Skip_Long_Lines On + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + +[INPUT] + Name tail + Tag application.* + Path /var/log/containers/fluent-bit* + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_log.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + +[INPUT] + Name tail + Tag application.* + Path /var/log/containers/cloudwatch-agent* + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_cwagent.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + +[FILTER] + Name aws + Match application.* + az false + ec2_instance_id false + Enable_Entity true + +[FILTER] + Name kubernetes + Match application.* + Kube_URL https://kubernetes.default.svc:443 + Kube_Tag_Prefix application.var.log.containers. + Merge_Log On + Merge_Log_Key log_processed + K8S-Logging.Parser On + K8S-Logging.Exclude Off + Labels Off + Annotations Off + Use_Kubelet On + Kubelet_Port 10250 + Buffer_Size 0 + Use_Pod_Association On + +[OUTPUT] + Name cloudwatch_logs + Match application.* + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/application + log_stream_prefix ${HOST_NAME}- + auto_create_group true + extra_user_agent container-insights + add_entity true + +dataplane-log.conf: +---- +[INPUT] + Name systemd + Tag dataplane.systemd.* + Systemd_Filter _SYSTEMD_UNIT=docker.service + Systemd_Filter _SYSTEMD_UNIT=containerd.service + Systemd_Filter _SYSTEMD_UNIT=kubelet.service + DB /var/fluent-bit/state/systemd.db + Path /var/log/journal + Read_From_Tail ${READ_FROM_TAIL} + +[INPUT] + Name tail + Tag dataplane.tail.* + Path /var/log/containers/aws-node*, /var/log/containers/kube-proxy* + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_dataplane_tail.db + Mem_Buf_Limit 50MB + Skip_Long_Lines On + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + +[FILTER] + Name modify + Match dataplane.systemd.* + Rename _HOSTNAME hostname + Rename _SYSTEMD_UNIT systemd_unit + Rename MESSAGE message + Remove_regex ^((?!hostname|systemd_unit|message).)*$ + +[FILTER] + Name aws + Match dataplane.* + imds_version v2 + +[OUTPUT] + Name cloudwatch_logs + Match dataplane.* + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/dataplane + log_stream_prefix ${HOST_NAME}- + auto_create_group true + extra_user_agent container-insights + +fluent-bit.conf: +---- +[SERVICE] + Flush 5 + Grace 30 + Log_Level error + Daemon off + Parsers_File parsers.conf + storage.path /var/fluent-bit/state/flb-storage/ + storage.sync normal + storage.checksum off + storage.backlog.mem_limit 5M + +@INCLUDE application-log.conf +@INCLUDE dataplane-log.conf +@INCLUDE host-log.conf + +host-log.conf: +---- +[INPUT] + Name tail + Tag host.dmesg + Path /var/log/dmesg + Key message + DB /var/fluent-bit/state/flb_dmesg.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + +[INPUT] + Name tail + Tag host.messages + Path /var/log/messages + Parser syslog + DB /var/fluent-bit/state/flb_messages.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + +[INPUT] + Name tail + Tag host.secure + Path /var/log/secure + Parser syslog + DB /var/fluent-bit/state/flb_secure.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + +[FILTER] + Name aws + Match host.* + imds_version v2 + +[OUTPUT] + Name cloudwatch_logs + Match host.* + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/host + log_stream_prefix ${HOST_NAME}. + auto_create_group true + extra_user_agent container-insights + +parsers.conf: +---- +[PARSER] + Name syslog + Format regex + Regex ^(?