diff --git a/.crux_dry_run_build b/.crux_dry_run_build new file mode 100644 index 00000000..17a84de1 --- /dev/null +++ b/.crux_dry_run_build @@ -0,0 +1,3 @@ +# list of versionsets to run Dry-run Builds in. Used when you make or update a CR. +AWSDevDocs/public +AWSDevDocsBJS/public diff --git a/.gitconfig b/.gitconfig new file mode 100644 index 00000000..fb0fcb94 --- /dev/null +++ b/.gitconfig @@ -0,0 +1,8 @@ +[difftool "vscode"] + cmd = code --wait --diff $LOCAL $REMOTE +[diff] + tool = vscode +[mergetool "vscode"] + cmd = code --wait --merge $REMOTE $LOCAL $BASE $MERGED +[merge] + tool = vscode diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..6bdaa999 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,6 @@ +*Issue #, if available:* + +*Description of changes:* + + +By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. diff --git a/.github/workflows/vale.yml b/.github/workflows/vale.yml new file mode 100644 index 00000000..bad1eb18 --- /dev/null +++ b/.github/workflows/vale.yml @@ -0,0 +1,27 @@ +name: Style check + +on: + pull_request: + workflow_dispatch: + +jobs: + style-job: + runs-on: ubuntu-latest + steps: + - name: Check out + uses: actions/checkout@v3 + + # For AsciiDoc users: + - name: Install Asciidoctor + run: sudo apt-get install -y asciidoctor + + - name: Run Vale + uses: errata-ai/vale-action@reviewdog + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + fail_on_error: true + reporter: github-pr-check + filter_mode: added + files: latest/ug + continue-on-error: false \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100755 index 00000000..7bc09874 --- /dev/null +++ b/.gitignore @@ -0,0 +1,80 @@ +*.running.properties.txt +*\~ +*.mobi +build +*.DS_Store +.attach_pid* +*.7z +*.action +*.apk +*.app +*.avi +*.bat +*.bin +*.cab +*.cmd +*.com +*.command +*.cpl +*.csh +*.dmg +*.doc +*.docx +*.ex_ +*.exe +*.gadget +*.html +*.inf +*.ins +*.inx +*.ipa +*.iso +*.isu +*.job +*.jse +*.ksh +*.lnk +*.mov +*.mp3 +*.mp4 +*.msc +*.msi +*.msp +*.mst +*.osx +*.out +*.paf +*.pdf +*.pif +*.potm +*.potx +*.ppam +*.pptm +*.pptx +*.prg +*.ps1 +*.rar +*.reg +*.rgs +*.rtf +*.run +*.scr +*.sct +*.shar +*.shb +*.shs +*.tar +*.u3p +*.vb +*.vbe +*.vbs +*.vbscript +*.workflow +*.ws +*.wsf +*.wsh +*.xls +*.xlsx +*.xpr +vale/styles/AsciiDoc/ +vale/styles/RedHat/ diff --git a/.vale.ini b/.vale.ini new file mode 100644 index 00000000..5f93659a --- /dev/null +++ b/.vale.ini @@ -0,0 +1,16 @@ +StylesPath = vale/styles + +MinAlertLevel = suggestion + +Packages = RedHat, AsciiDoc + +Vocab = EksDocsVocab + +# Ignore files in dirs starting with `.` to avoid raising errors for `.vale/fixtures/*/testinvalid.adoc` files +[[!.]*.adoc] +BasedOnStyles = RedHat, AsciiDoc, EksDocs +RedHat.GitLinks = OFF +AsciiDoc.UnsetAttributes = OFF +RedHat.CaseSensitiveTerms = suggestion +RedHat.TermsErrors = warning +RedHat.Spacing = warning diff --git a/.vscode/asciidoc.code-snippets b/.vscode/asciidoc.code-snippets new file mode 100644 index 00000000..cdceb00e --- /dev/null +++ b/.vscode/asciidoc.code-snippets @@ -0,0 +1,185 @@ +{ + // Place your snippets for AsciiDoc here. Each snippet is defined under a snippet name and has a prefix, body and + // description. The prefix is what is used to trigger the snippet and the body will be expanded and inserted. Possible variables are: + // $1, $2 for tab stops, $0 for the final cursor position, and ${1:label}, ${2:another} for placeholders. Placeholders with the + // same ids are connected. + // Example: + // "Print to console": { + // "prefix": "log", + // "body": [ + // "console.log('$1');", + // "$2" + // ], + // "description": "Log output to console" + // } + // https://code.visualstudio.com/docs/editor/userdefinedsnippets + // https://www.freecodecamp.org/news/definitive-guide-to-snippets-visual-studio-code/ + "adoc tablist": { + "prefix": "tablist", + "body": [ + "====", + "[role=\"tablist\"]", + "${1:Fargate - [.noloc]`Linux`}::", + "+", + "[source,bash,subs=\"verbatim,attributes\"]", + "----", + "eksctl create cluster --name my-cluster --region region-code --fargate", + "----\n", + "${2:Managed nodes - [.noloc]`Linux`}::", + "+", + "[source,bash,subs=\"verbatim,attributes\"]", + "----", + "eksctl create cluster --name my-cluster --region region-code", + "----", + "eksctl create cluster --name my-cluster --region region-code", + "----\n", + "====" + ], + "description": "adoc tablist" + }, + "adoc step tablist": { + "prefix": "tab-step", + "body": [ + "====", + "[role=\"tablist\"]", + "${1:{aws-management-console}`}::", + "+", + "[source,bash,subs=\"verbatim,attributes\"]", + "----", + "eksctl create cluster --name my-cluster --region region-code --fargate", + "----\n", + "${2:[.noloc]`eksctl`}::", + "+", + "[source,bash,subs=\"verbatim,attributes\"]", + "----", + "eksctl create cluster --name my-cluster --region region-code", + "----\n", + "${3:{aws} CLI}::", + "+", + "[source,bash,subs=\"verbatim,attributes\"]", + "----", + "eksctl create cluster --name my-cluster --region region-code", + "----\n", + "${3:{aws} CloudFormation}::", + "+", + "[source,bash,subs=\"verbatim,attributes\"]", + "----", + "eksctl create cluster --name my-cluster --region region-code", + "----\n", + "====" + ], + "description": "adoc step tablist" + }, + "adoc region tablist": { + "prefix": "tab-region", + "body": [ + "====", + "[role=\"tablist\"]", + "${1:{aws}`}::", + "`eks-cluster.[.replaceable]``region``.api.aws`", + "\n", + "${2:{aws} GovCloud (US)}::", + "`eks-cluster.[.replaceable]``region``.api.aws`", + "\n", + "${3:{amazon-web-services} in China::", + "`eks-cluster.[.replaceable]``region``.api.amazonwebservices.com.cn`", + "\n", + "====" + ], + "description": "adoc region tablist" + }, + "Code Block": { + "prefix": "adoc-code", + "body": [ + "[source,${1|yaml,cli,json,java,python,javascript,ruby,go,bash|}]", + "----", + "${2:// Your code here}", + "----", + "$0" + ], + "description": "Code block with language selection" + }, + "AWS AsciiDoc Link": { + "prefix": "adoc-link", + "body": [ + "link:${1:path}[\"${2:link text}\",type=\"${3|documentation,marketing,console,blog,api|}\"]" + ], + "description": "Create an AWS documentation link in AsciiDoc format" + }, + "Admonition Block": { + "prefix": "adoc-admonition", + "body": [ + "[${1|NOTE,TIP,IMPORTANT,CAUTION,WARNING|}]", + "====", + "${2:Admonition content}", + "====", + "$0" + ], + "description": "Admonition block with type selection" + }, + "Table": { + "prefix": "adoc-table", + "body": [ + "[%header,cols=\"${1:1,1}\"]", + "|===", + "| ${2:Header 1} | ${3:Header 2}", + "", + "| ${4:Cell 1} | ${5:Cell 2}", + "|===", + "$0" + ], + "description": "Basic table with headers" + }, + "Cross Reference": { + "prefix": "adoc-xref", + "body": [ + "<<${1:anchor-id},${2:display text}>>$0" + ], + "description": "Cross-reference link" + }, + "Definition List": { + "prefix": "adoc-deflist", + "body": [ + "${1:Term 1}::", + "${2:Definition 1}", + "", + "${3:Term 2}::", + "${4:Definition 2}", + "$0" + ], + "description": "Definition list" + }, + "NODE_ROOT Section": { + "prefix": "adoc-topic", + "body": [ + "//!!NODE_ROOT
", + "[.topic]", + "[[${1:page-id},${1:page-id}.title]]", + "= ${2:page title goes here}", + ":info_doctype: section", + "", + "include::../attributes.txt[]", + "", + ], + "description": "Creates a NODE_ROOT section template with topic class and ID" + }, + "Include with Leveloffset": { + "prefix": "adoc-inc", + "body": [ + "include::${1:filename}.adoc[leveloffset=+1]$0" + ], + "description": "Creates an include statement with leveloffset=+1, automatically adding .adoc extension" + }, + "AsciiDoc Collapsible Section": { + "prefix": "adoc-collapse", + "body": [ + "[[${1:collapsible-section-id},${1:collapsible-section-id}.title]]", + ".${2:Section Title}", + "[%collapsible, expand-section=\"_collapse_all_\"]", + "====", + "${3:This is where the text to collapse goes.}", + "====" + ], + "description": "Creates a collapsible section in AsciiDoc" + } +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 00000000..269fa424 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,35 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Convert and Open AsciiDoc", + "type": "shell", + "command": "bash", + "args": [ + "-c", + "asciidoctor ${file} && open ${fileDirname}/${fileBasenameNoExtension}.html" + ], + "group": { + "kind": "build", + "isDefault": true + }, + "presentation": { + "reveal": "always", + "panel": "shared", + "showReuseMessage": false, + "clear": true + }, + "problemMatcher": [], + "options": { + "cwd": "${workspaceFolder}" + } + } + ], + "inputs": [ + { + "id": "currentFile", + "type": "command", + "command": "extension.commandvariable.file.filePath" + } + ] +} \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..f0fc6442 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,32 @@ +# default rule, unless a more specific rule applies +* @eks-admins + +# lower approval for specific non-content folders +/.vscode/ fincd@amazon.com + +# default rule for user guide +/latest/ug/ @eks-contributors + +# guide section assignments +/latest/ug/clusters/ gcline@amazon.com +/latest/ug/manage-access/ gcline@amazon.com +/latest/ug/workloads/ gcline@amazon.com +/latest/ug/networking/ fincd@amazon.com +/latest/ug/storage/ pgasca@amazon.com +/latest/ug/nodes/ pgasca@amazon.com +/latest/ug/connector/ fincd@amazon.com +/latest/ug/contribute/ gcline@amazon.com +/latest/ug/getting-started/ mcngs@amazon.com +/latest/ug/integrations/ gcline@amazon.com +/latest/ug/observability/ pgasca@amazon.com +/latest/ug/outposts/ fincd@amazon.com +/latest/ug/security/ fincd@amazon.com +/latest/ug/what-is/ mcngs@amazon.com + +# use default rule +# /latest/ug/troubleshooting/ +# /latest/ug/images/ +# /latest/ug/images_BJS/ +# /latest/ug/YAML +# /latest/ug/iam_policies/ +# /latest/ug/diagrams/ \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..3b644668 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..a0d95567 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,56 @@ +# Guidelines for contributing + +Thank you for your interest in contributing to AWS documentation! We greatly value feedback and contributions from our community. + +Please read through this document before you submit any pull requests or issues. It will help us work together more effectively. + +## What to expect when you contribute + +When you submit a pull request, our team is notified and will respond as quickly as we can. We'll do our best to work with you to ensure that your pull request adheres to our style and standards. If we merge your pull request, we might make additional edits later for style or clarity. + +The AWS documentation source files on GitHub aren't published directly to the official documentation website. If we merge your pull request, we'll publish your changes to the documentation website as soon as we can, but they won't appear immediately or automatically. + +We look forward to receiving your pull requests for: + +* New content you'd like to contribute (such as new code samples or tutorials) +* Inaccuracies in the content +* Information gaps in the content that need more detail to be complete +* Typos or grammatical errors +* Suggested rewrites that improve clarity and reduce confusion + +**Note:** We all write differently, and you might not like how we've written or organized something currently. We want that feedback. But please be sure that your request for a rewrite is supported by the previous criteria. If it isn't, we might decline to merge it. + +## How to contribute + +To contribute, send us a pull request. For small changes, such as fixing a typo or adding a link, you can use the [GitHub Edit Button](https://blog.github.com/2011-04-26-forking-with-the-edit-button/). For larger changes: + +1. [Fork the repository](https://help.github.com/articles/fork-a-repo/). +2. In your fork, make your change in a branch that's based on this repo's **main** branch. +3. Commit the change to your fork, using a clear and descriptive commit message. +4. [Create a pull request](https://help.github.com/articles/creating-a-pull-request-from-a-fork/), answering any questions in the pull request form. + +Before you send us a pull request, please be sure that: + +1. You're working from the latest source on the **main** branch. +2. You check [existing open](https://github.com/awsdocs/amazon-eks-user-guide/pulls), and [recently closed](https://github.com/awsdocs/amazon-eks-user-guide/pulls?q=is%3Apr+is%3Aclosed), pull requests to be sure that someone else hasn't already addressed the problem. +3. You [create an issue](https://github.com/awsdocs/amazon-eks-user-guide/issues/new) before working on a contribution that will take a significant amount of your time. + +For contributions that will take a significant amount of time, [open a new issue](https://github.com/awsdocs/amazon-eks-user-guide/issues/new) to pitch your idea before you get started. Explain the problem and describe the content you want to see added to the documentation. Let us know if you'll write it yourself or if you'd like us to help. We'll discuss your proposal with you and let you know whether we're likely to accept it. We don't want you to spend a lot of time on a contribution that might be outside the scope of the documentation or that's already in the works. + +## Finding contributions to work on + +If you'd like to contribute, but don't have a project in mind, look at the [open issues](https://github.com/awsdocs/amazon-eks-user-guide/issues) in this repository for some ideas. Any issues with the [help wanted](https://github.com/awsdocs/amazon-eks-user-guide/labels/help%20wanted) or [enhancement](https://github.com/awsdocs/amazon-eks-user-guide/labels/enhancement) labels are a great place to start. + +In addition to written content, we really appreciate new examples and code samples for our documentation, such as examples for different platforms or environments, and code samples in additional languages. + +## Code of conduct + +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information, see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact [opensource-codeofconduct@amazon.com](mailto:opensource-codeofconduct@amazon.com) with any additional questions or comments. + +## Security issue notifications + +If you discover a potential security issue, please notify AWS Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public issue on GitHub. + +## Licensing + +See the [LICENSE](https://github.com/awsdocs/amazon-eks-user-guide/blob/main/LICENSE) file for this project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. diff --git a/Config b/Config new file mode 100755 index 00000000..6a1d1ce1 --- /dev/null +++ b/Config @@ -0,0 +1,31 @@ +# -*-perl-*- + +package.AmazonEKSDocs = { + flavors = { + map = single; + generation = 1; + }; + + interfaces = (3.0); + deploy = { + generic = true; + }; + scope = webservices; + + build-system = happytrails; + build-environment = { + chroot = basic; + network-access = blocked; + }; + + build-tools = { + 3.0 = { + HappyTrails = 3.2; + AWSEC2ContainerChecklist = 1.0; + AWSDevDocsQuotasShare = 1.0; + JavaBuildAndTestMin = jdk8; + ZonBook = 4.0; + AWSDevDocsChecklistBJS = 2.0; + }; + }; +}; diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..7785b904 --- /dev/null +++ b/LICENSE @@ -0,0 +1,152 @@ +Creative Commons Attribution-ShareAlike 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. + +Section 1 – Definitions. + + a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. + + e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. + + i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. + + k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. + +Section 2 – Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: + + A. reproduce and Share the Licensed Material, in whole or in part; and + + B. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. + + 3. Term. The term of this Public License is specified in Section 6(a). + + 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. + + 5. Downstream recipients. + + A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. + + B. Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. + + C. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. + + 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this Public License. + + 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. + +Section 3 – License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified form), You must: + + A. retain the following if it is supplied by the Licensor with the Licensed Material: + + i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of warranties; + + v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; + + B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and + + C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. + + 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. + + b. ShareAlike.In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. + + 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. + +Section 4 – Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; + + b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and + + c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. +For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. + +Section 5 – Disclaimer of Warranties and Limitation of Liability. + + a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. + + b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. + + c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. + +Section 6 – Term and Termination. + + a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or + + 2. upon express reinstatement by the Licensor. + + c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. + + d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. + + e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. + +Section 7 – Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. + +Section 8 – Interpretation. + + a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. + + c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. + + d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. diff --git a/LICENSE-SAMPLECODE b/LICENSE-SAMPLECODE new file mode 100644 index 00000000..14aabc34 --- /dev/null +++ b/LICENSE-SAMPLECODE @@ -0,0 +1,14 @@ +Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSE-SUMMARY b/LICENSE-SUMMARY new file mode 100644 index 00000000..56888df1 --- /dev/null +++ b/LICENSE-SUMMARY @@ -0,0 +1,5 @@ +Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file. + +The sample code within this documentation is made available under a modified MIT license. See the LICENSE-SAMPLECODE file. diff --git a/README.md b/README.md new file mode 100644 index 00000000..180508b6 --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +## Amazon EKS User Guide + +Welcome to the Amazon EKS User Guide repository. This repository contains the open source version of the [Amazon EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/). + +## Important Update + +This repository will be temporarily taken down to prepare for a new contributor experience. The repository will return at the same url by mid-November. + +## New Contribution Experience Coming Soon + +We are temporarily taking down the current GitHub repository to prepare for an enhanced contribution experience. The new version will be available in mid-November with the following improvements: + +- **AsciiDoc-Powered Documentation**: The guide will use AsciiDoc, an intuitive yet powerful authoring language similar to Markdown that offers: + - Advanced formatting capabilities + - Robust cross-referencing + - Enhanced security controls + - Enterprise-grade documentation features + +- **Streamlined Contribution Process**: + - Direct editing of documentation source files + - Faster pull request processing + - Increased automation + - Reduced manual steps + +## Stay Tuned + +For more information about the new experience, see [Contribute](https://docs.aws.amazon.com/eks/latest/userguide/contribute.html) in the Amazon EKS User Guide. + +We look forward to your contributions when we launch the new GitHub experience. The improved platform will make it easier than ever to help us enhance the Amazon EKS documentation. + + + +## License Summary + +The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file. + +The sample code within this documentation is made available under a modified MIT license. See the LICENSE-SAMPLECODE file. diff --git a/build-info.xml b/build-info.xml new file mode 100755 index 00000000..a0d5e5d5 --- /dev/null +++ b/build-info.xml @@ -0,0 +1,49 @@ + + + + + eks + Amazon EKS + 303 + Amazon EKS + + + + + enabled + 0 + + + + userguide + eks-ug + User Guide + eks + latest + latest + latest/ug + en_us + + mesh-gs-eks + + + awsdocs + amazon-eks-user-guide + mainline + latest/ug + + + + Amazon EKS Document History + doc-history + + + + 1 + + + + + + + diff --git a/build.xml b/build.xml new file mode 100755 index 00000000..d8c709f3 --- /dev/null +++ b/build.xml @@ -0,0 +1,6 @@ + + + This is the entry point for happy trails builds (package builder and eclipse). + + + \ No newline at end of file diff --git a/eks-docs.code-workspace b/eks-docs.code-workspace new file mode 100644 index 00000000..9f126cbf --- /dev/null +++ b/eks-docs.code-workspace @@ -0,0 +1,32 @@ +{ + "folders": [ + { + "name": "📦 AmazonEKSDocs", + "path": "." + } + ], + "settings": { + "files.associations": { + "*.adoc": "asciidoc" + }, + "editor.wordWrap": "wordWrapColumn", + "editor.wordWrapColumn": 80, + "[asciidoc]": { + "editor.wordWrap": "wordWrapColumn", + "editor.wordWrapColumn": 80, + "editor.formatOnSave": true, + "editor.tabSize": 2, + "editor.insertSpaces": true, + "editor.rulers": [80] + }, + "asciidoc.preview.scrollPreviewWithEditor": true, + "asciidoc.preview.scrollEditorWithPreview": true, + "asciidoc.antora.showEnableAntoraPrompt": false + }, + "extensions": { + "recommendations": [ + "asciidoctor.asciidoctor-vscode" + ] + } +} + diff --git a/latest/ug/YAML/aws-auth-hybrid.yml b/latest/ug/YAML/aws-auth-hybrid.yml new file mode 100644 index 00000000..cae044ad --- /dev/null +++ b/latest/ug/YAML/aws-auth-hybrid.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aws-auth + namespace: kube-system +data: + mapRoles: | + - groups: + - system:bootstrappers + - system:nodes + rolearn: + username: system:node:{{SessionName}} \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/allow-ui-client.yaml b/latest/ug/YAML/network-policy-stars-demo/allow-ui-client.yaml new file mode 100644 index 00000000..9cf5f086 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/allow-ui-client.yaml @@ -0,0 +1,13 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: client + name: allow-ui +spec: + podSelector: + matchLabels: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + role: management-ui \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/allow-ui.yaml b/latest/ug/YAML/network-policy-stars-demo/allow-ui.yaml new file mode 100644 index 00000000..a14d9128 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/allow-ui.yaml @@ -0,0 +1,13 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: stars + name: allow-ui +spec: + podSelector: + matchLabels: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + role: management-ui \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/backend-policy.yaml b/latest/ug/YAML/network-policy-stars-demo/backend-policy.yaml new file mode 100644 index 00000000..2f9a9991 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/backend-policy.yaml @@ -0,0 +1,17 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: stars + name: backend-policy +spec: + podSelector: + matchLabels: + role: backend + ingress: + - from: + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 diff --git a/latest/ug/YAML/network-policy-stars-demo/backend.yaml b/latest/ug/YAML/network-policy-stars-demo/backend.yaml new file mode 100644 index 00000000..81ca7d52 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/backend.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: backend + namespace: stars +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + role: backend +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend + namespace: stars +spec: + replicas: 1 + selector: + matchLabels: + role: backend + template: + metadata: + labels: + role: backend + spec: + containers: + - name: backend + image: calico/star-probe:v0.1.0 + imagePullPolicy: Always + command: + - probe + - --http-port=6379 + - --urls=http://frontend.stars:80/status,http://backend.stars:6379/status,http://client.client:9000/status + ports: + - containerPort: 6379 \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/client.yaml b/latest/ug/YAML/network-policy-stars-demo/client.yaml new file mode 100644 index 00000000..556fb87e --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/client.yaml @@ -0,0 +1,43 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: client + labels: + role: client +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: client + namespace: client +spec: + replicas: 1 + selector: + matchLabels: + role: client + template: + metadata: + labels: + role: client + spec: + containers: + - name: client + image: calico/star-probe:v0.1.0 + imagePullPolicy: Always + command: + - probe + - --urls=http://frontend.stars:80/status,http://backend.stars:6379/status + ports: + - containerPort: 9000 +--- +apiVersion: v1 +kind: Service +metadata: + name: client + namespace: client +spec: + ports: + - port: 9000 + targetPort: 9000 + selector: + role: client \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/default-deny.yaml b/latest/ug/YAML/network-policy-stars-demo/default-deny.yaml new file mode 100644 index 00000000..ead282c1 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/default-deny.yaml @@ -0,0 +1,7 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: default-deny +spec: + podSelector: + matchLabels: {} \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/default-deny.yaml.1 b/latest/ug/YAML/network-policy-stars-demo/default-deny.yaml.1 new file mode 100644 index 00000000..ead282c1 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/default-deny.yaml.1 @@ -0,0 +1,7 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: default-deny +spec: + podSelector: + matchLabels: {} \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/frontend-policy.yaml b/latest/ug/YAML/network-policy-stars-demo/frontend-policy.yaml new file mode 100644 index 00000000..34f8b0f6 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/frontend-policy.yaml @@ -0,0 +1,17 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: stars + name: frontend-policy +spec: + podSelector: + matchLabels: + role: frontend + ingress: + - from: + - namespaceSelector: + matchLabels: + role: client + ports: + - protocol: TCP + port: 80 diff --git a/latest/ug/YAML/network-policy-stars-demo/frontend.yaml b/latest/ug/YAML/network-policy-stars-demo/frontend.yaml new file mode 100644 index 00000000..8d805718 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/frontend.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: frontend + namespace: stars +spec: + ports: + - port: 80 + targetPort: 80 + selector: + role: frontend +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend + namespace: stars +spec: + replicas: 1 + selector: + matchLabels: + role: frontend + template: + metadata: + labels: + role: frontend + spec: + containers: + - name: frontend + image: calico/star-probe:v0.1.0 + imagePullPolicy: Always + command: + - probe + - --http-port=80 + - --urls=http://frontend.stars:80/status,http://backend.stars:6379/status,http://client.client:9000/status + ports: + - containerPort: 80 \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/management-ui.yaml b/latest/ug/YAML/network-policy-stars-demo/management-ui.yaml new file mode 100644 index 00000000..82401f8c --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/management-ui.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: management-ui + labels: + role: management-ui +--- +apiVersion: v1 +kind: Service +metadata: + name: management-ui + namespace: management-ui +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 9001 + selector: + role: management-ui +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: management-ui + namespace: management-ui +spec: + replicas: 1 + selector: + matchLabels: + role: management-ui + template: + metadata: + labels: + role: management-ui + spec: + containers: + - name: management-ui + image: calico/star-collect:v0.1.0 + imagePullPolicy: Always + ports: + - containerPort: 9001 \ No newline at end of file diff --git a/latest/ug/YAML/network-policy-stars-demo/namespace.yaml b/latest/ug/YAML/network-policy-stars-demo/namespace.yaml new file mode 100644 index 00000000..2920a0c8 --- /dev/null +++ b/latest/ug/YAML/network-policy-stars-demo/namespace.yaml @@ -0,0 +1,4 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: stars \ No newline at end of file diff --git a/latest/ug/YAML/vpc-cni-nodeaffinity-hybrid.yml b/latest/ug/YAML/vpc-cni-nodeaffinity-hybrid.yml new file mode 100644 index 00000000..14bca5dd --- /dev/null +++ b/latest/ug/YAML/vpc-cni-nodeaffinity-hybrid.yml @@ -0,0 +1,22 @@ +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + - hybrid \ No newline at end of file diff --git a/latest/ug/attributes.txt b/latest/ug/attributes.txt new file mode 100644 index 00000000..784e706d --- /dev/null +++ b/latest/ug/attributes.txt @@ -0,0 +1,230 @@ +// EKS-specific attributes + +:eksctl-min-version: 0.199.0 + +// EKS Auto Mode +:auto-cli-v2-version: 2.12.3 +:auto-cli-v1-version: 1.27.160 + +// Words Geoffrey often spells wrong or doesn't like to type + +:ret: retrieve +:resp: responsibility +:det: determine +:cap: capability +:caps: capabilites +:recd: recommended +:config: configuration +:ind: indicate +:ena: enable +:dis: disable + +// AWS shared content + +// Region specific +// Entities that differ depending on the Region build such as China +:arn-aws: pass:q[[.shared]``region.arn``] + +// Service names +:amazon-cloudwatch: pass:q[[.shared]``CWlong``] +:amazon-rds: pass:q[[.shared]``RDS``] +:amazon-route-53: pass:q[[.shared]``R53long``] +:amazon-route-53-resolver: pass:q[[.shared]``R53Rlong``] +:amazon-s3: pass:q[[.shared]``S3``] +:amazon-virtual-private-cloud: pass:q[[.shared]``VPClong``] +:amazon-vpc: pass:q[[.shared]``VPC``] +:amazon-elastic-block-store: pass:q[[.shared]``EBSlong``] +:amazon-elastic-file-system: pass:q[[.shared]``EFSlong``] +:amazon-efs: pass:q[[.shared]``EFS``] +:amazon-web-services: pass:q[[.shared]``AWSlong``] +:aws: pass:q[[.shared]``AWS``] +:application-load-balancer: pass:q[[.shared]``ALB``] +:application-load-balancers: pass:q[[.shared]``ALBs``] +:aws-account: pass:q[[.shared]``AWS-account``] +:aws-accounts: pass:q[[.shared]``AWS-accounts``] +:aws-always: pass:q[[.shared]``AWS-always``] +:aws-artifact: pass:q[[.shared]``ART``] +:aws-billing: pass:q[[.shared]``Billinglong``] +:aws-billing-cost-management: pass:q[[.shared]``ABlong``] +:aws-cloud: pass:q[[.shared]``AWS-Cloud``] +:aws-cloudtrail: pass:q[[.shared]``CTlong``] +:aws-command-line-interface: pass:q[[.shared]``CLIlong``] +:aws-config: pass:q[[.shared]``CC``] +:aws-cost-explorer: pass:q[[.shared]``AWSCostExplorerServicelong``] +:aws-direct-connect: pass:q[[.shared]``AWS-DC``] +:aws-identity-and-access-management: pass:q[[.shared]``IAMlong``] +:aws-kms: pass:q[[.shared]``KMS``] +:aws-key-management-service: pass:q[[.shared]``KMSlong``] +:aws-kms-key: pass:q[[.shared]``kms-key-long``] +:aws-kms-keys: pass:q[[.shared]``kms-keys-long``] +:aws-license-manager: pass:q[[.shared]``LIClong``] +:aws-management-console: pass:q[[.shared]``consolelong``] +:aws-organizations: pass:q[[.shared]``AOlong``] +:aws-marketplace: pass:q[[.shared]``MKT``] +:aws-region: pass:q[[.shared]``AWS-Region``] +:aws-regions: pass:q[[.shared]``AWS-Regions``] +:aws-security-token-service: pass:q[[.shared]``STSlong``] +:aws-service: pass:q[[.shared]``AWS-service``] +:aws-services: pass:q[[.shared]``AWS-services``] +:aws-service-quotas: pass:q[[.shared]``SQ``] +:aws-support: pass:q[[.shared]``SUP``] +:aws-sts: pass:q[[.shared]``STS``] +:aws-transit-gateway: pass:q[[.shared]``AWSTGlong``] +:aws-vpn: pass:q[[.shared]``VPN``] +:classic-load-balancer: pass:q[[.shared]``CLB``] +:classic-load-balancers: pass:q[[.shared]``CLBs``] +:cli: pass:q[[.shared]``CLI``] +:cloudtrail: pass:q[[.shared]``CT``] +:cloudwatch: pass:q[[.shared]``CW``] +:cluster: pass:q[[.shared]``cluster``] +:cluster-cap: pass:q[[.shared]``Cluster``] +:ebs: pass:q[[.shared]``EBS``] +:ec2: pass:q[[.shared]``EC2``] +:ec2-auto-scaling: pass:q[[.shared]``ASlong``] +:elastic-load-balancing: pass:q[[.shared]``ELB``] +:iam: pass:q[[.shared]``IAM``] +:kms-key: pass:q[[.shared]``kms-key``] +:kms-keys: pass:q[[.shared]``kms-keys``] +:license-manager: pass:q[[.shared]``LIC``] +:organizations: pass:q[[.shared]``AO``] +:privatelink: pass:q[[.shared]``privatelink``] +:rosa-service-name-long: pass:q[[.shared]``ROSAlong``] +:rosa-service-name-short: pass:q[[.shared]``ROSA``] +:route-53: pass:q[[.shared]``R53``] +:route-53-resolver: pass:q[[.shared]``R53R``] +:sts: pass:q[[.shared]``STSshort``] +:transit-gateway: pass:q[[.shared]``AWSSTG``] +:cloudformation: pass:q[[.shared]``CFN``] +:outposts: pass:q[[.shared]``OUTlong``] +:eks-a: pass:q[[.shared]``EKS-A``] + +//AWS Regions + +:us-east-1-name: US East (N. Virginia) Region +:us-east-1-region: US East (N. Virginia) +:us-east-1-code: us-east-1 + +:us-east-2-name: US East (Ohio) Region +:us-east-2-region: US East (Ohio) +:us-east-2-code: us-east-2 + +:us-west-1-name: US West (N. California) Region +:us-west-1-region: US West (N. California) +:us-west-1-code: us-west-1 + +:us-west-2-name: US West (Oregon) Region +:us-west-2-region: US West (Oregon) +:us-west-2-code: us-west-2 + +:af-capetown-name: Africa (Cape Town) Region +:af-capetown-region: Africa (Cape Town) +:af-capetown-code: af-south-1 + +:ap-hongkong-name: Asia Pacific (Hong Kong) Region +:ap-hongkong-region: Asia Pacific (Hong Kong) +:ap-hongkong-code: ap-east-1 + +:ap-hyderabad-name: Asia Pacific (Hyderabad) Region +:ap-hyderabad-region: Asia Pacific (Hyderabad) +:ap-hyderabad-code: ap-south-2 + +:ap-jakarta-name: Asia Pacific (Jakarta) Region +:ap-jakarta-region: Asia Pacific (Jakarta) +:ap-jakarta-code: ap-southeast-3 + +:ap-melbourne-name: Asia Pacific (Melbourne) Region +:ap-melbourne-region: Asia Pacific (Melbourne) +:ap-melbourne-code: ap-southeast-4 + +:ap-mumbai-name: Asia Pacific (Mumbai) Region +:ap-mumbai-region: Asia Pacific (Mumbai) +:ap-mumbai-code: ap-south-1 + +:ap-osaka-name: Asia Pacific (Osaka) Region +:ap-osaka-region: Asia Pacific (Osaka) +:ap-osaka-code: ap-northeast-3 + +:ap-seoul-name: Asia Pacific (Seoul) Region +:ap-seoul-region: Asia Pacific (Seoul) +:ap-seoul-code: ap-northeast-2 + +:ap-singapore-name: Asia Pacific (Singapore) Region +:ap-singapore-region: Asia Pacific (Singapore) +:ap-singapore-code: ap-southeast-1 + +:ap-sydney-name: Asia Pacific (Sydney) Region +:ap-sydney-region: Asia Pacific (Sydney) +:ap-sydney-code: ap-southeast-2 + +:ap-tokyo-name: Asia Pacific (Tokyo) Region +:ap-tokyo-region: Asia Pacific (Tokyo) +:ap-tokyo-code: ap-northeast-1 + +:ca-central-name: Canada (Central) Region +:ca-central-region: Canada (Central) +:ca-central-code: ca-central-1 + +:eu-frankfort-name: Europe (Frankfort) Region +:eu-frankfort-region: Europe (Frankfort) +:eu-frankfort-code: eu-central-1 + +:eu-ireland-name: Europe (Ireland) Region +:eu-ireland-region: Europe (Ireland) +:eu-ireland-code: eu-west-1 + +:eu-london-name: Europe (London) Region +:eu-london-region: Europe (London) +:eu-london-code: eu-west-2 + +:eu-milan-name: Europe (Milan) Region +:eu-milan-region: Europe (Milan) +:eu-milan-code: eu-south-1 + +:eu-paris-name: Europe (Paris) Region +:eu-paris-region: Europe (Paris) +:eu-paris-code: eu-west-3 + +:eu-spain-name: Europe (Spain) Region +:eu-spain-region: Europe (Spain) +:eu-spain-code: eu-south-2 + +:eu-stockholm-name: Europe (Stockholm) Region +:eu-stockholm-region: Europe (Stockholm) +:eu-stockholm-code: eu-north-1 + +:eu-zurich-name: Europe (Zurich) Region +:eu-zurich-region: Europe (Zurich) +:eu-zurich-code: eu-central-2 + +:me-bahrain-name: Middle East (Bahrain) Region +:me-bahrain-region: Middle East (Bahrain) +:me-bahrain-code: me-south-1 + +:me-uae-name: Middle East (UAE) Region +:me-uae-region: Middle East (UAE) +:me-uae-code: me-central-1 + +:sa-saopaulo-name: South America (São Paulo) Region +:sa-saopaulo-region: South America (São Paulo) +:sa-saopaulo-code: sa-east-1 + +:govcloud-us: {aws} GovCloud (US) + +:us-gov-east-1-name: {aws} GovCloud (US-East) Region +:us-gov-east-1-region: {aws} GovCloud (US-East) +:us-gov-east-1-code: us-gov-east-1 + +:us-gov-west-1-name: {aws} GovCloud (US-West) Region +:us-gov-east-1-region: {aws} GovCloud (US-West) +:us-gov-east-1-code: us-gov-west-1 + +// EKS Auto Mode attributes + +:yec: your EKS Auto Mode cluster +:yaa: your {aws} account +:emi: EC2 managed instance +:eam: EKS Auto Mode +:mng: managed node group +:e2i: EC2 Instance +:k8s: Kubernetes +:k8s-n: 1.31 \ No newline at end of file diff --git a/latest/ug/automode/adoc-windows.adoc b/latest/ug/automode/adoc-windows.adoc new file mode 100644 index 00000000..96228a29 --- /dev/null +++ b/latest/ug/automode/adoc-windows.adoc @@ -0,0 +1,98 @@ += Configure an AsciiDoc Authoring Environment on Windows using Visual Studio Code + +== Prereqs + +* Visual Studio Code installed from Microsoft Website +* Connected to Amazon VPN +* Know your Cloud Dev Desktop Hostname, such as `http://dev-dsk-mcngs-2a-f991a827.us-west-2.amazon.com/` +* Amazon Windows Image + +== Step 1: Clear SSH Keys + +[IMPORTANT] +==== +Do not complete this step if you use SSH to access remote devices not managed by Amazon Corporate. +==== + +. Open PowerShell: +** Press Windows key + X +** Select "Windows PowerShell (Admin)" or "Windows Terminal (Admin)" +** If prompted by User Account Control, click "Yes" + +. Backup existing SSH keys: +** Navigate to your SSH directory by typing: `cd ~\.ssh` +** Create a backup folder: `mkdir ~\ssh_backup` +** Copy all files to the backup location: `Copy-Item -Path ~\.ssh\* -Destination ~\ssh_backup -Recurse` +** Verify the backup by checking the contents: `dir ~\ssh_backup` + +. Remove existing SSH keys: +** Make sure you're in the SSH directory: `cd ~\.ssh` +** Delete all files in the .ssh folder: `Remove-Item -Path ~\.ssh\* -Recurse -Force` +** Verify the directory is empty: `dir` +** If the .ssh directory itself is causing issues, you can remove it entirely: `Remove-Item ~\.ssh -Recurse -Force` + +[NOTE] +==== +If you need to restore your keys later, you can copy them back from the backup folder using: +`Copy-Item -Path ~\ssh_backup\* -Destination ~\.ssh -Recurse` +==== + +== Step 2: Create new ECDSA SSH Key + +. `ssh-keygen -t ecdsa` + +== Step 3: Use Midway to Sign SSH Key + +. `mwinit` -- WITHOUT options +. You will need to run this step periodically from windows powershell. +** You need to run mwinit from each device, including your laptop and CDD. + +== Step 4: Test SSH + +. Run `ssh @git.amazon.com -v ` +. Review output +. If you don't see a success message, sent the output to Geoffrey + +== Step 5: Configure VS Code + +. Open VS Code +. Open the extension panel in the left sidebar (tetris style icon) +. Search for "SSH" remote extension and install +. Open the remote panel in the left sidebar (computer monitor icon with circular icon in bottom right) +. Mouse over `SSH` and then press the plus icon +. Insert the following value: `ssh @` +. Press enter and follow the prompts +. If the VS Code Window shows your CDD hostname in blue in the botton left of the window, that VS Code Window is connected to your CDD +. Once it is connect, use `ctrl-`` to open a terminal window. + +== Step 6: Clone Package on Cloud Dev Desktop + +[source,bash] +---- +brazil ws create --name eks +cd eks +brazil ws use -vs AWSDevDocsAlpha/alpha +brazil ws use AmazonEKSDocs +cd src/AmazonEKSDocs +pwd +---- + +In the AmazonEKSDocs package root is a file called `eks-docs.code-workspace`, use the output of `pwd` and this file name to determine the full path to the code workspace. + +== Step 7: Open Code Workspace + +From a SSH connected VS Code Window, go to "file" and then "open workspace from file". Use the path to the code workspace file. + +Using this file, VS code will automatically configure itself for authoring asciidoc. + +== To reconnect + +. Connect to VPN +. Use powershell to run mwinit +. Open VS Code +. Select the `eks-docs` workspace from recently opened workspaces, verify the hostname of your CDD is shown by the workspace name on the home screen +. Alternatively, open the Remote Explorer panel in the left sidebar +. Find your CDD and the eks-docs workspace under it, mouse over it +. Use the right arrow icon to open the workspace in the curent window, or the plus icon to open in a new window + + diff --git a/latest/ug/automode/api-reference.adoc b/latest/ug/automode/api-reference.adoc new file mode 100644 index 00000000..e0e73f46 --- /dev/null +++ b/latest/ug/automode/api-reference.adoc @@ -0,0 +1,191 @@ + +[.topic] += EKA Auto Mode API Reference WIP +:info_doctype: section + +:ind: indicate +:Ind: Indicate +:ena: enable +:dis: disable +:cap: capability +:caps: capabilities +:yec: your EKS Auto Mode cluster +:yaa: your {aws} account +:emi: EC2 Managed Instance +:eam: EKS Auto Mode +:lbi: load balancing +:bs: block storage + +:fmis: For more information, see +:in-guide: in the EKS User Guide + +:generic-update-request: For example, enable the capability. +:generic-status-request: For example, if the {cap} is {ena}d or {dis}d. +:generic-describe-cap: {Ind}s the current configuration of the {cap} on {yec}. {generic-status-request} +:generic-config-request: Request to update the configuration of the + +:comp-cap-desc: If the compute {cap} is enabled, {eam} will create and delete {emi}s in {yaa}. +:comp-cap-link: {fmis} {eam} compute {cap} {in-guide}. + +:lb-cap-desc: If the load balancing {cap} is enabled, {eam} will create and delete {emi}s in {yaa}. +:lb-cap-link: {fmis} {eam} load balancing {cap} {in-guide}. + +:ebs-cap-desc: If the {bs} {cap} is enabled, {eam} will create and delete EBS volumes in {yaa}. +:ebs-cap-link: {fmis} {eam} {bs} {cap} {in-guide}. + +:iam-link: {fmis} the IAM Reference {in-guide}. + +:launch-limitation: Currently, you cannot selectively enable or disable {eam} {caps}. The compute {cap}, {bs} {cap}, and {lbi} {cap} must all be enabled or disabled. You must enable or disable all three capabilities in the same API request. + +== Capabilities + + +// Load Balancing +* ElasticLoadBalancing +** {generic-describe-cap} {lb-cap-link} + + + +// Storage +* BlockStorage +** {generic-describe-cap} {ebs-cap-link} + +// Storage +* BlockStorage$controllerRole +** The IAM role used by {eam} to manage EBS volumes. {iam-link} + + +// missing compute cap? + + +== $enabled + + +// Load Balancing +* ElasticLoadBalancing$enabled +** {ind}s if the {lbi} {cap} is enabled on {yec}. {lb-cap-desc} + + +// Storage +* BlockStorage$enabled +** {ind}s if the {bs} {cap} is enabled on {yec}. {ebs-cap-desc} + + +//missing compute cap? + +== CreateClusterRequest + + +// Compute +* CreateClusterRequest$computeConfig +** Enable or disable the compute {cap} of {eam} when creating {yec}. {comp-cap-desc} + + +// Storage +* CreateClusterRequest$storageConfig +** Enable or disable the {bs} {cap} of {eam} when creating {yec}. {ebs-cap-desc} + + +== Cluster$ Config + + +// Compute +* Cluster$computeConfig +** {generic-describe-cap} {comp-cap-desc} {comp-cap-link} + + +// Storage +* Cluster$storageConfig +** {generic-describe-cap} {ebs-cap-desc} {ebs-cap-link} + + +== ConfigRequest + +* ComputeConfigRequest +** {generic-config-request} the compute {cap} of your {eam}. {generic-update-request} {comp-cap-link} + +* StorageConfigRequest +** {generic-config-request} the storage {cap} of your {eam}. {generic-update-request} {ebs-cap-link} + + +=== Load Balancing + + +* KubernetesNetworkConfigRequest$elasticLoadBalancing +** Request to {ena} or {dis} the {lbi} {cap} on {yec}. {lb-cap-link} + +=== Compute + + +* ComputeConfigRequest$enabled +** Request to {ena} or {dis} the compute {cap} on {yec}. {comp-cap-desc} + + +* ComputeConfigRequest$nodePools +** Configuration for node pools that defines the compute resources for {yec}. {fmis} {eam} Node Pools {in-guide}. + +* ComputeConfigRequest$nodeRoleArn +** The ARN of the IAM Role EKS will assign to {emi}s in {yec}. This value cannot be changed after the compute {cap} of {eam} is enabled. {iam-link} + +=== Storage + + +* StorageConfigRequest$blockStorage +** Request to configure EBS Block Storage settings for {yec}. + + +== ConfigResponse +:status-of-request: the status of the request to update + +// Compute +* ComputeConfigResponse +** {ind}s {status-of-request} the compute {cap} of {yec}. + + +// Storage +* StorageConfigResponse +** {ind}s {status-of-request} the {bs} {cap} of {yec}. + +=== Response pointers to objects + + +// Storage +* StorageConfigResponse$blockStorage +** {ind}s the current configuration of the {bs} {cap} on {yec}. {generic-status-request} + +// Load Balancing +* $elasticLoadBalancing +** {ind}s the current configuration of the {lbi} {cap} on {yec}. {generic-status-request} + + +=== Compute Details + + +// Compute +* ComputeConfigResponse$enabled +** {ind}s if the compute {cap} is enabled on {yec}. {comp-cap-desc} + + +// Compute +* ComputeConfigResponse$nodePools +** {ind}s the current configuration of node pools in {yec}. {fmis} {eam} Node Pools {in-guide}. + +// Compute +* ComputeConfigResponse$nodeRoleArn +** The ARN of the IAM Role EKS will assign to {emi}s in {yec}. + + +== UpdateClusterConfigRequest +:update-config: Update the configuration of + +// Storage +* UpdateClusterConfigRequest$storageConfig +** {update-config} the {bs} {cap} of {yec}. {generic-update-request} + +// Compute +* UpdateClusterConfigRequest$computeConfig +** {update-config} the compute {cap} of {yec}. {generic-update-request} + +//where is LB? + + diff --git a/latest/ug/automode/associate-workload.adoc b/latest/ug/automode/associate-workload.adoc new file mode 100644 index 00000000..232512ed --- /dev/null +++ b/latest/ug/automode/associate-workload.adoc @@ -0,0 +1,52 @@ +//!!NODE_ROOT
+ +include::../attributes.txt[] + +[.topic] +[[associate-workload,associate-workload.title]] += Control if a workload is deployed on EKS Auto Mode nodes +:info_doctype: section +:info_title: Control if a workload is deployed on EKS Auto Mode nodes +:info_titleabbrev: Control workload deployment +:info_abstract: Control if a workload is deployed on EKS Auto Mode nodes + +When running workloads in an EKS cluster with {eam}, you might need to control whether specific workloads run on {eam} nodes or other compute types. This topic describes how to use node selectors and affinity rules to ensure your workloads are scheduled on the intended compute infrastructure. + +The examples in this topic demonstrate how to use the `eks.amazonaws.com/compute-type` label to either require or prevent workload deployment on {eam} nodes. This is particularly useful in mixed-mode clusters where you're running both {eam} and other compute types, such as self-managed Karpenter provisioners or EKS Managed Node Groups. + +{eam} nodes have set the value of the label `eks.amazonaws.com/compute-type` to `auto`. You can use this label to control if a workload is deployed to nodes managed by {eam}. + +== Require a workload is deployed to {eam} nodes + +[NOTE] +==== +This `nodeSelector` value is not required for {eam}. This `nodeSelector` value is only relevant if you are running a cluster in a mixed mode, node types not managed by {eam}. For example, you may have static compute capacity deployed to your cluster with EKS Managed Node Groups, and have dynamic compute capacity managed by {eam}. +==== + +You can add this `nodeSelector` to Deployments or other workloads to require Kubernetes schedule them onto {eam} nodes. + +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +spec: + nodeSelector: + eks.amazonaws.com/compute-type: auto +---- + +== Require a workload is not deployed to {eam} nodes + +You can add this `nodeAffinity` to Deployments or other workloads to require Kubernetes *not* schedule them onto {eam} nodes. + +[source,yaml] +---- + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - auto +---- diff --git a/latest/ug/automode/auto-configure-alb.adoc b/latest/ug/automode/auto-configure-alb.adoc new file mode 100644 index 00000000..c5fbc1fe --- /dev/null +++ b/latest/ug/automode/auto-configure-alb.adoc @@ -0,0 +1,209 @@ +//!!NODE_ROOT
+[.topic] +[[auto-configure-alb,auto-configure-alb.title]] += Create an IngressClass to configure an Application Load Balancer +:info_doctype: section +:info_titleabbrev: Create ingress class + +include::../attributes.txt[] + +EKS Auto Mode automates routine tasks for load balancing, including exposing cluster apps to the internet. + +{aws} suggests using Application Load Balancers (ALB) to serve HTTP and HTTPS traffic. Application Load Balancers can route requests based on the content of the request. For more information on Application Load Balancers, see link:elasticloadbalancing/latest/userguide/what-is-load-balancing.html["What is Elastic Load Balancing?",type="documentation"] + +EKS Auto Mode creates and configures Application Load Balancers (ALBs). For example, EKS Auto Mode creates a load balancer when you create an `Ingress` Kubernetes objects and configures it to route traffic to your cluster workload. + +**Overview** + +. Create an `IngressClassParams` resource, specifying {aws} specific configuration values such as the certificate to use for SSL/TLS and VPC Subnets. +. Create an `IngressClass` resource, specifying that EKS Auto Mode will be the controller for the resource. +. Create an `Ingress` resource that associates a HTTP path and port with a cluster workload. +. EKS Auto Mode will create an Application Load Balancer that points to the workload specified in the `Ingress` resource, using the load balancer configuration specified in the `IngressClassParams` resource. + +## Prerequisites + +* EKS Auto Mode Enabled on an Amazon EKS Cluster +* Kubectl configured to connect to your cluster +** You can use `kubectl apply -f ` to apply the sample configuration YAML files below to your cluster. + +## Step 1: Create IngressClassParams + +Create an `IngressClassParams` object to specify {aws} specific configuration options for the Application Load Balancer. Use the reference below to update the sample YAML file. + +Note the name you set for the `IngressClassParams` resource, you will need it in the next step. + +```yaml +apiVersion: eks.amazonaws.com/v1 +kind: IngressClassParams +metadata: + name: alb +spec: + scheme: internet-facing +``` + + + +## Step 2: Create IngressClass + +Create an `IngressClass` that references the {aws} specific configuration values set in the `IngressClassParams` resource. Note the name of the `IngressClass` . In this example, both the `IngressClass` and `IngressClassParams` are named `alb`. + +Use the `is-default-class` annotation to control if `Ingress` resources should use this class by default. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: alb + annotations: + # Use this annotation to set an IngressClass as Default + # If an Ingress doesn't specify a class, it will use the Default + ingressclass.kubernetes.io/is-default-class: "true" +spec: + # Configures the IngressClass to use EKS Auto Mode + controller: eks.amazonaws.com/alb + parameters: + apiGroup: eks.amazonaws.com + kind: IngressClassParams + # Use the name of the IngressClassParams set in the previous step + name: alb +``` + +For more information on configuration options, see <>. + +## Step 3: Create Ingress + +Create an `Ingress` resource. The purpose of this resource is to associate paths and ports on the Application Load Balancer with workloads in your cluster. + +For more information about configuring this resource, see https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress] in the Kubernetes Documentation. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: 2048-ingress +spec: + # this matches the name of IngressClass. + # this can be omitted if you have a default ingressClass in cluster: the one with ingressclass.kubernetes.io/is-default-class: "true" annotation + ingressClassName: alb + rules: + - http: + paths: + - path: /* + pathType: ImplementationSpecific + backend: + service: + name: + port: + number: 80 +``` + + + +## Step 4: Check Status + +Use `kubectl` to find the status of the `Ingress`. It can take a few minutes for the load balancer to become available. + +Use the name of the `Ingress` resource you set in the previous step. + +``` +kubectl get ingress +``` + +Once the resource is ready, retrieve the domain name of the load balancer. + +``` +kubectl get ingress api-ingress -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + +To view the service in a web browser, review the port and path specified in the `Ingress` rescue. + +## Step 5: Cleanup + +To clean up the load balancer, use the following command: + +``` +kubectl delete ingress +``` + +EKS Auto Mode will automatically delete the associated load balancer in your {aws} account. + +[[ingress-reference,ingress-reference.title]] +== IngressClassParams Reference + +The table below is a quick reference for commonly used configuration options. + +[cols="3*", options="header"] +|=== +|Field |Description |Example Value +|`scheme` +|Defines whether the ALB is internal or internet-facing +|`internet-facing` +|`namespaceSelector` +|Restricts which namespaces can use this IngressClass +|`environment: prod` +|`group.name` +|Groups multiple Ingresses to share a single ALB +|`retail-apps` +|`ipAddressType` +|Sets IP address type for the ALB +|`dualstack` +|`subnets.ids` +|List of subnet IDs for ALB deployment +|`subnet-xxxx, subnet-yyyy` +|`subnets.tags` +|Tag filters to select subnets for ALB +|`Environment: prod` +|`certificateARNs` +|ARNs of SSL certificates to use +|`arn:aws:acm:region:account:certificate/id` +|`tags` +|Custom tags for {aws} resources +|`Environment: prod, Team: platform` +|`loadBalancerAttributes` +|Load balancer specific attributes +|`idle_timeout.timeout_seconds: 60` +|=== + + +== Considerations + +* You cannot use Annotations on an IngressClass to configure load balancers with EKS Auto Mode. +* You must update the Cluster IAM Role to enable tag propagation from Kubernetes to {aws} Load Balancer resources. For more information, see <>. +* For information about associating resources with either EKS Auto Mode or the self-managed {aws} Load Balancer Controller, see <>. +* For information about fixing issues with load balancers, see <>. +* For more considerations about using the load balancing capability of EKS Auto Mode, see <>. + +The following tables provide a detailed comparison of changes in IngressClassParams, Ingress annotations, and TargetGroupBinding configurations for EKS Auto Mode. These tables highlight the key differences between the load balancing capability of EKS Auto Mode and the open source load balancer controller, including API version changes, deprecated features, and updated parameter names. + +=== IngressClassParams + +[options="header"] +|=== +| Previous | New | Description +| `elbv2.k8s.aws/v1beta1` | `eks.amazonaws.com/v1` | API version change +| `spec.certificateArn` | `spec.certificateARNs` | Support for multiple certificate ARNs +| `spec.subnets.tags` | `spec.subnets.matchTags` | Changed subnet matching schema +| `spec.listeners.listenerAttributes` | `spec.listeners.attributes` | Simplified attribute naming +|=== + +=== Ingress annotations + +[options="header"] +|=== +| Previous | New | Description +| `kubernetes.io/ingress.class` | Not supported | Use `spec.ingressClassName` on Ingress objects +| `alb.ingress.kubernetes.io/group.name` | Not supported | Specify groups in IngressClass only +| `alb.ingress.kubernetes.io/waf-acl-id` | Not supported | Use WAF v2 instead +| `alb.ingress.kubernetes.io/web-acl-id` | Not supported | Use WAF v2 instead +| `alb.ingress.kubernetes.io/shield-advanced-protection` | Not supported | Shield integration disabled +|=== + +=== TargetGroupBinding + +[options="header"] +|=== +| Previous | New | Description +| `elbv2.k8s.aws/v1beta1` | `eks.amazonaws.com/v1` | API version change +| `spec.targetType` optional | `spec.targetType` required | Explicit target type specification +| `spec.networking.ingress.from` | Not supported | No longer supports NLB without security groups +|=== diff --git a/latest/ug/automode/auto-configure-nlb.adoc b/latest/ug/automode/auto-configure-nlb.adoc new file mode 100644 index 00000000..916fc693 --- /dev/null +++ b/latest/ug/automode/auto-configure-nlb.adoc @@ -0,0 +1,156 @@ +//!!NODE_ROOT
+[.topic] +[[auto-configure-nlb,auto-configure-nlb.title]] += Use Service Annotations to configure Network Load Balancers +:info_doctype: section +:info_titleabbrev: Create service + +include::../attributes.txt[] + +Learn how to configure Network Load Balancers (NLB) in Amazon EKS using Kubernetes service annotations. This topic explains the annotations supported by EKS Auto Mode for customizing NLB behavior, including internet accessibility, health checks, SSL/TLS termination, and IP targeting modes. + +When you create a Kubernetes service of type `LoadBalancer` in EKS Auto Mode, EKS automatically provisions and configures an {aws} Network Load Balancer based on the annotations you specify. This declarative approach allows you to manage load balancer configurations directly through your Kubernetes manifests, maintaining infrastructure as code practices. + +EKS Auto Mode handles Network Load Balancer provisioning by default for all services of type LoadBalancer - no additional controller installation or configuration is required. The `loadBalancerClass: eks.amazonaws.com/nlb `specification is automatically set as the cluster default, streamlining the deployment process while maintaining compatibility with existing Kubernetes workloads. + + +== Sample Service + +For more information about the Kubernetes `Service` resource, see https://kubernetes.io/docs/concepts/services-networking/service/[the Kubernetes Documentation]. + +Review the sample `Service` resource below: + +[source,yaml] +---- +apiVersion: v1 +kind: Service +metadata: + name: echoserver + annotations: + # Specify the load balancer scheme as internet-facing to create a public-facing Network Load Balancer (NLB) + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing +spec: + selector: + app: echoserver + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + type: LoadBalancer + # Specify the new load balancer class for NLB as part of EKS Auto Mode feature + # For clusters with Auto Mode enabled, this field can be omitted as it's the default + loadBalancerClass: eks.amazonaws.com/nlb +---- + +== Commonly used annotations + +The following table lists commonly used annotations supported by EKS Auto Mode. Note that EKS Auto Mode may not support all annotations. + +[TIP] +==== +All of the following annotations need to be prefixed with `service.beta.kubernetes.io/` +==== + + +[role="no-scroll"] +[cols="1,1,1", options="header"] +|=== +|Field |Description |Example + +|`aws-load-balancer-type` +|Specifies the load balancer type. Use `external` for new deployments. +|`external` + +|`aws-load-balancer-nlb-target-type` +|Specifies whether to route traffic to node instances or directly to pod IPs. Use `instance` for standard deployments or `ip` for direct pod routing. +|`instance` + +|`aws-load-balancer-scheme` +|Controls whether the load balancer is internal or internet-facing. +|`internet-facing` + +|`aws-load-balancer-healthcheck-protocol` +|Health check protocol for target group. Common options are `TCP` (default) or `HTTP`. +|`HTTP` + +|`aws-load-balancer-healthcheck-path` +|The HTTP path for health checks when using HTTP/HTTPS protocol. +|`/healthz` + +|`aws-load-balancer-healthcheck-port` +|Port used for health checks. Can be a specific port number or `traffic-port`. +|`traffic-port` + +|`aws-load-balancer-subnets` +|Specifies which subnets to create the load balancer in. Can use subnet IDs or names. +|`subnet-xxxx, subnet-yyyy` + +|`aws-load-balancer-ssl-cert` +|ARN of the SSL certificate from {aws} Certificate Manager for HTTPS/TLS. +|`arn:aws:acm:region:account:certificate/cert-id` + +|`aws-load-balancer-ssl-ports` +|Specifies which ports should use SSL/TLS. +|`443, 8443` + +|`load-balancer-source-ranges` +|CIDR ranges allowed to access the load balancer. +|`10.0.0.0/24, 192.168.1.0/24` + +|`aws-load-balancer-additional-resource-tags` +|Additional {aws} tags to apply to the load balancer and related resources. +|`Environment=prod,Team=platform` + +|`aws-load-balancer-ip-address-type` +|Specifies whether the load balancer uses IPv4 or dual-stack (IPv4 + IPv6). +|`ipv4` or `dualstack` +|=== + +== Considerations + +* You must update the Cluster IAM Role to enable tag propagation from Kubernetes to {aws} Load Balancer resources. For more information, see <>. +* For information about associating resources with either EKS Auto Mode or the self-managed {aws} Load Balancer Controller, see <>. +* For information about fixing issues with load balancers, see <>. +* For more considerations about using the load balancing capability of EKS Auto Mode, see <>. + +When migrating to EKS Auto Mode for load balancing, several changes in service annotations and resource configurations are necessary. The following tables outline key differences between previous and new implementations, including unsupported options and recommended alternatives. + +=== Service annotations + +[options="header"] +|=== +| Previous | New | Description +| `service.beta.kubernetes.io/load-balancer-source-ranges` | Not supported | Use `spec.loadBalancerSourceRanges` on Service +| `service.beta.kubernetes.io/aws-load-balancer-type` | Not supported | Use `spec.loadBalancerClass` on Service +| `service.beta.kubernetes.io/aws-load-balancer-internal` | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-scheme` +| Various load balancer attributes | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-attributes` +| `service.beta.kubernetes.io/aws-load-balancer-proxy-protocol` | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-attributes` instead +| `service.beta.kubernetes.io/aws-load-balancer-access-log-enabled` | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-attributes` instead +| `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name` | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-attributes` instead +| `service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix` | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-attributes` instead +| `service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled` | Not supported | Use `service.beta.kubernetes.io/aws-load-balancer-attributes` instead +|=== + +To migrate from deprecated load balancer attribute annotations, consolidate these settings into the `service.beta.kubernetes.io/aws-load-balancer-attributes` annotation. This annotation accepts a comma-separated list of key-value pairs for various load balancer attributes. For example, to specify proxy protocol, access logging, and cross-zone load balancing, use the following format: + +```yaml +service.beta.kubernetes.io/aws-load-balancer-attributes: | + proxy_protocol.v2.enabled=true + access_logs.s3.enabled=true + access_logs.s3.bucket=my-bucket + access_logs.s3.prefix=my-prefix + load_balancing.cross_zone.enabled=true + +``` + +This consolidated format provides a more consistent and flexible way to configure load balancer attributes while reducing the number of individual annotations needed. Review your existing Service configurations and update them to use this consolidated format. + +=== TargetGroupBinding + +[options="header"] +|=== +| Previous | New | Description +| `elbv2.k8s.aws/v1beta1` | `eks.amazonaws.com/v1` | API version change +| `spec.targetType` optional | `spec.targetType` required | Explicit target type specification +| `spec.networking.ingress.from` | Not supported | No longer supports NLB without security groups +|=== diff --git a/latest/ug/automode/auto-disable.adoc b/latest/ug/automode/auto-disable.adoc new file mode 100644 index 00000000..ce6df411 --- /dev/null +++ b/latest/ug/automode/auto-disable.adoc @@ -0,0 +1,63 @@ +//!!NODE_ROOT
+[.topic] +[[auto-disable,auto-disable.title]] += Disable EKS Auto Mode +:info_doctype: section + +include::../attributes.txt[] + +You can disable EKS Auto Mode on an existing EKS Cluster. This is a destructive operation. + +* EKS will terminate all EC2 instances operated by EKS Auto Mode. +* EKS will delete all Load Balancers operated by EKS Auto Mode. +* EKS will *not* delete EBS volumes provisioned by EKS Auto Mode. + +EKS Auto Mode is designed to fully manage the resources that it creates. +Manual interventions could result in EKS Auto Mode failing to completely clean up those resources when it is disabled. +For example, if you referred to a managed Security Group from external Security Group rules, +and forget to remove that reference before you disable EKS Auto Mode for a cluster, the +managed Security Group will leak (not be deleted). +Steps below describe how to remove a leaked Security Group if that should happen. + +== Disable EKS Auto Mode ({aws} Console) + +. Open your cluster overview page in the {aws} Management Console. +. Under *EKS Auto Mode* select *Manage* +. Toggle *EKS Auto Mode* to `off`. + +If any managed Security Group is not deleted at the end of this process, you can delete it manually using descriptions from link:vpc/latest/userguide/deleting-security-groups.html[Delete a security group,type="documentation"]. + +== Disable EKS Auto Mode ({aws} CLI) + +Use the following command to disable {eam} on an existing cluster. + +You need to have the `aws` CLI installed, and be logged in with sufficent permissions to manage EKS clusters. For more information, see <>. + +[NOTE] +==== +The compute, block storage, and load balancing capabilities must all be enabled or disabled in the same request. +==== + +[source,cli] +---- +aws eks update-cluster-config \ + --name $CLUSTER_NAME \ + --compute-config enabled=false \ + --kubernetes-network-config '{"elasticLoadBalancing":{"enabled": false}}' \ + --storage-config '{"blockStorage":{"enabled": false}}' +---- + +You can check if a leaked EKS Auto Mode Security Group failed to be deleted after disabling EKS Auto Mode as follows: + +[source,cli] +---- +aws ec2 describe-security-groups \ + --filters Name=tag:eks:eks-cluster-name,Values= Name=tag-key,Values=ingress.eks.amazonaws.com/resource,service.eks.amazonaws.com/resource --query "SecurityGroups[*].[GroupName]" +---- + +To then delete the Security Group: + +[source,cli] +---- +aws ec2 delete-security-group --group-name= +---- diff --git a/latest/ug/automode/auto-elb-example.adoc b/latest/ug/automode/auto-elb-example.adoc new file mode 100644 index 00000000..10edb9ed --- /dev/null +++ b/latest/ug/automode/auto-elb-example.adoc @@ -0,0 +1,241 @@ +//!!NODE_ROOT
+ +[.topic] +[[auto-elb-example,auto-elb-example.title]] += Deploy a Sample Load Balancer Workload to EKS Auto Mode +:info_doctype: section +:info_title: Deploy a sample load balancer workload to EKS Auto Mode +:info_titleabbrev: Deploy load balancer workload +:info_abstract: Deploy a sample load balancer workload to EKS Auto Mode + + +include::../attributes.txt[] + + +This guide walks you through deploying a containerized version of the 2048 game on Amazon EKS, complete with load balancing and internet accessibility. + +== Prerequisites + +* An EKS Auto Mode cluster +* `kubectl` configured to interact with your cluster +* Appropriate IAM permissions for creating ALB resources + +== Step 1: Create the Namespace + +First, create a dedicated namespace for the 2048 game application. + +Create a file named `01-namespace.yaml`: + +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: game-2048 +---- + +Apply the namespace configuration: + +[source,bash] +---- +kubectl apply -f 01-namespace.yaml +---- + +== Step 2: Deploy the Application + +The application runs multiple replicas of the 2048 game container. + +Create a file named `02-deployment.yaml`: + +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: game-2048 + name: deployment-2048 +spec: + selector: + matchLabels: + app.kubernetes.io/name: app-2048 + replicas: 5 + template: + metadata: + labels: + app.kubernetes.io/name: app-2048 + spec: + containers: + - image: public.ecr.aws/l6m2t8p7/docker-2048:latest + imagePullPolicy: Always + name: app-2048 + ports: + - containerPort: 80 + resources: + requests: + cpu: "0.5" +---- + +**Key components:** + +- Deploys 5 replicas of the application +- Uses a public ECR image +- Requests 0.5 CPU cores per pod +- Exposes port 80 for HTTP traffic + +Apply the deployment: + +[source,bash] +---- +kubectl apply -f 02-deployment.yaml +---- + +== Step 3: Create the Service + +The service exposes the deployment to the cluster network. + +Create a file named `03-service.yaml`: + +[source,yaml] +---- +apiVersion: v1 +kind: Service +metadata: + namespace: game-2048 + name: service-2048 +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + type: NodePort + selector: + app.kubernetes.io/name: app-2048 +---- + +**Key components:** + +- Creates a NodePort service +- Maps port 80 to the container's port 80 +- Uses label selector to find pods + +Apply the service: + +[source,bash] +---- +kubectl apply -f 03-service.yaml +---- + +== Step 4: Configure Load Balancing + +You will set up an ingress to expose the application to the internet. + +First, create the `IngressClass`. Create a file named `04-ingressclass.yaml`: + +[source,yaml] +---- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + namespace: game-2048 + labels: + app.kubernetes.io/name: LoadBalancerController + name: alb +spec: + controller: eks.amazonaws.com/alb +---- + +Then create the Ingress resource. Create a file named `05-ingress.yaml`: + +[source,yaml] +---- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + namespace: game-2048 + name: ingress-2048 + annotations: + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip +spec: + ingressClassName: alb + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: service-2048 + port: + number: 80 +---- + +**Key components:** + +- Creates an internet-facing ALB +- Uses IP target type for direct pod routing +- Routes all traffic (/) to the game service + +Apply the ingress configurations: + +[source,bash] +---- +kubectl apply -f 04-ingressclass.yaml +kubectl apply -f 05-ingress.yaml +---- + +== Step 5: Verify the Deployment + +. Check that all pods are running: ++ +[source,bash] +---- +kubectl get pods -n game-2048 +---- +. Verify the service is created: ++ +[source,bash] +---- +kubectl get svc -n game-2048 +---- +. Get the ALB endpoint: ++ +[source,bash] +---- +kubectl get ingress -n game-2048 +---- + +The ADDRESS field in the ingress output will show your ALB endpoint. Wait 2-3 minutes for the ALB to provision and register all targets. + +== Step 6: Access the Game + +Open your web browser and browse to the ALB endpoint URL from the earlier step. You should see the 2048 game interface. + +== Step 7: Cleanup + +To remove all resources created in this tutorial: + +[source,bash] +---- +kubectl delete namespace game-2048 +---- + +This will delete all resources in the namespace, including the deployment, service, and ingress resources. + +== What's Happening Behind the Scenes + +. The deployment creates 5 pods running the 2048 game +. The service provides stable network access to these pods +. EKS Auto Mode: +** Creates an Application Load Balancer in {aws} +** Configures target groups for the pods +** Sets up routing rules to direct traffic to the service + +[[auto-elb-troubleshooting,auto-elb-troubleshooting.title]] +== Troubleshooting + +If the game doesn't load: + +- Ensure all pods are running: `kubectl get pods -n game-2048` +- Check ingress status: `kubectl describe ingress -n game-2048` +- Verify ALB health checks: Check the target group health in {aws} Console diff --git a/latest/ug/automode/auto-enable-existing.adoc b/latest/ug/automode/auto-enable-existing.adoc new file mode 100644 index 00000000..1e0dab07 --- /dev/null +++ b/latest/ug/automode/auto-enable-existing.adoc @@ -0,0 +1,150 @@ +//!!NODE_ROOT
+ +[.topic] +[[auto-enable-existing,auto-enable-existing.title]] += Enable EKS Auto Mode on an existing cluster +:info_doctype: section +:info_title: Enable EKS Auto Mode on an existing cluster +:info_titleabbrev: Enable on cluster +:info_abstract: Enable EKS Auto Mode on an existing cluster + +include::../attributes.txt[] + +This topic describes how to enable Amazon EKS Auto Mode on your existing Amazon EKS clusters. Enabling Auto Mode on an existing cluster requires updating IAM permissions and configuring core EKS Auto Mode settings. Once enabled, you can begin migrating your existing compute workloads to take advantage of Auto Mode's simplified operations and automated infrastructure management. + +[IMPORTANT] +==== +Verify you have the minimum required version of certain Amazon EKS Add-ons installed before enabling EKS Auto Mode. For more information, see <>. +==== + + +Before you begin, ensure you have administrator access to your Amazon EKS cluster and permissions to modify IAM roles. The steps in this topic guide you through enabling Auto Mode using either the {aws} Management Console or {aws} CLI. + +== {aws} Management Console + +You must be logged into the {aws} console with permission to manage IAM, EKS, and EC2 resources. + +[NOTE] +==== +The Cluster IAM role of an EKS Cluster cannot be changed after the cluster is created. {eam} requires additional permissions on this role. You must attach additional policies to the current role. +==== + +=== Update Cluster IAM Role + +. Open your cluster overview page in the {aws} Management Console. +. Under *Cluster IAM role ARN*, select *View in IAM*. +. From the *Add Permissions* dropdown, select *Attach Policies*. +. Use the *Search* box to find and select the following policies: +** `AmazonEKSComputePolicy` +** `AmazonEKSBlockStoragePolicy` +** `AmazonEKSLoadBalancingPolicy` +** `AmazonEKSNetworkingPolicy` +** `AmazonEKSClusterPolicy` +. Select *Add permissions* +. From the *Trust relationships* tab, select *Edit trust policy* +. Insert the following Cluster IAM Role trust policy, and select *Update policy* + +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +---- + + +=== Enable {eam} + +. Open your cluster overview page in the {aws} Management Console. +. Under *EKS Auto Mode* select *Manage* +. Toggle *EKS Auto Mode* to on. +. From the *EKS Node Pool* dropdown, select the default node pools you want to create. +** Learn more about Node Pools in {eam}. For more information, see <>. +. If you have previously created an {eam} Node IAM role this {aws} account, select it in the *Node IAM Role* dropdown. If you have not created this role before, select *Create {recd} Role* and follow the steps. + + +== {aws} CLI + +=== Prerequisites +* The Cluster IAM Role of the existing EKS Cluster must include sufficent permissiosn for {eam}, such as the following policies: +** `AmazonEKSComputePolicy` +** `AmazonEKSBlockStoragePolicy` +** `AmazonEKSLoadBalancingPolicy` +** `AmazonEKSNetworkingPolicy` +** `AmazonEKSClusterPolicy` +* The Cluster IAM Role must have an updated trust policy including the `sts:TagSession` action. For more information on creating a Cluster IAM Role, see <>. +* `aws` CLI installed, logged in, and a sufficent version. You must have permission to manage IAM, EKS, and EC2 resources. For more information, see <>. + +=== Procedure + +Use the following commands to enable {eam} on an existing cluster. + +[NOTE] +==== +The compute, block storage, and load balancing capabilities must all be enabled or disabled in the same request. +==== + +[source,cli] +---- +aws eks update-cluster-config \ + --name $CLUSTER_NAME \ + --compute-config enabled=true \ + --kubernetes-network-config '{"elasticLoadBalancing":{"enabled": true}}' \ + --storage-config '{"blockStorage":{"enabled": true}}' +---- + +[[auto-addons-required,auto-addons-required.title]] +== Required Add-on Versions + +If you're planning to enable EKS Auto Mode on an existing cluster, you may need to update certain add-ons. Please note: + +- This applies only to existing clusters transitioning to EKS Auto Mode. +- New clusters created with EKS Auto Mode enabled don't require these updates. + +If you have any of the following add-ons installed, ensure they are at least at the specified minimum version: + +[cols="1,1"] +|=== +| Add-on Name | Minimum Required Version + +| Amazon VPC CNI plugin for Kubernetes +| v1.19.0-eksbuild.1 + +| Kube-proxy +a| +* v1.25.16-eksbuild.22 +* v1.26.15-eksbuild.19 +* v1.27.16-eksbuild.14 +* v1.28.15-eksbuild.4 +* v1.29.10-eksbuild.3 +* v1.30.6-eksbuild.3 +* v1.31.2-eksbuild.3 + +| Amazon EBS CSI driver +| v1.37.0-eksbuild.1 + +| CSI snapshot controller +| v8.1.0-eksbuild.2 + +| EKS Pod Identity Agent +| v1.3.4-eksbuild.1 + +|=== + +For more information, see <>. + +== Next Steps + +* To migrate Manage Node Group workloads, see <>. +* To migrate from Self-Managed Karpenter, see <>. diff --git a/latest/ug/automode/auto-glossary.adoc b/latest/ug/automode/auto-glossary.adoc new file mode 100644 index 00000000..d2bfcb7f --- /dev/null +++ b/latest/ug/automode/auto-glossary.adoc @@ -0,0 +1,77 @@ +//!!NODE_ROOT
+[.topic] +[[auto-glossary,auto-glossary.title]] += Glossary +:info_doctype: section +:info_title: Glossary of terms for EKS Auto Mode +:info_titleabbrev: Glossary +:info_abstract: Glossary of terms for EKS Auto Mode + + +include::../attributes.txt[] + +IAM Role:: +An IAM identity that you can create in your {aws} account that has specific permissions. You can use IAM roles to delegate access to users, applications, or services that don't normally have access to your {aws} resources. + +* You can attach {aws} IAM Policies to the role. +* You can also use EKS access entries to attach Kubernetes RBAC permissions to the role. + +Kubernetes Role:: +A Kubernetes resource that defines a set of permission rules within a namespace. Roles specify which operations (verbs) are allowed on which resources in the Kubernetes API. + +{aws} Managed IAM Policy:: +A standalone policy that is created and administered by {aws}. {aws} managed policies are designed to provide permissions for many common use cases and are maintained by {aws}. + +EKS Access Policy:: +A set of Kubernetes permissions that are managed by Amazon EKS. You can associate an EKS access policy with an IAM role to control access to your Amazon EKS cluster. + +EKS Access Entry:: +The connection between an EKS access policy and an IAM role. An access entry gives an IAM role specific permissions on your Amazon EKS cluster. + +EKS Managed Node Group:: +//Find something from Alex? + +Fargate Pod:: +A pod that runs on {aws} Fargate. Each pod runs in its own isolated compute environment, and components such as the kernel, CPU resources, and memory resources are not shared with other pods. + +Self managed nodes:: +Amazon EC2 instances that you manage as part of your Amazon EKS cluster. You are responsible for security patches, system updates, and all maintenance tasks. + +Node Group:: +A collection of Amazon EC2 instances that are configured to work with Amazon EKS. Node groups contain instances with the same configuration. + +Node Class:: +A template that defines the compute infrastructure for your nodes. Node classes specify settings such as instance types, storage configuration, and networking options. + +Node Pool:: +A Kubernetes custom resource that connects your workloads to a node class. Node pools determine which pods run on which types of nodes. + +EKS Node Class for EKS Auto Mode:: +A node class that is optimized for Amazon EKS. This class uses Amazon EKS-optimized AMIs and integrates with {aws} services for security, monitoring, and maintenance. + +EC2 Node Class for Karpenter:: +A node class that works with Karpenter, an open-source node provisioning project. This class provides direct control over Amazon EC2 instance configuration without using managed node groups. + +Amazon EKS Auto Mode Managed Node:: +An Amazon EC2 instance that is configured and managed by Amazon EKS Auto Mode. These nodes receive automatic updates and are optimized for running Kubernetes workloads. + +EKS Auto Mode Managed Node Pools:: +The default node pools that are included with every Amazon EKS Auto Mode cluster. These pools are preconfigured for different workload types. + +EKS Auto Mode Managed Node Pool -- General Purpose:: +A default node pool that is suitable for most workloads. This pool provides balanced compute and memory resources. + +EKS Auto Mode Managed Node Pool -- System:: +A default node pool that is optimized for running system workloads and cluster components. + +EKS Auto Mode Managed EC2 Instance:: +An Amazon EC2 instance that is managed as part of Amazon EKS Auto Mode. {aws} maintains these instances and automatically applies security patches and updates. + +EKS Auto Mode -- Compute Capability:: +A feature that automatically manages compute resources in Amazon EKS Auto Mode clusters. This capability handles tasks such as scaling nodes and selecting instance types based on workload requirements. + +EKS Auto Mode -- Block Storage Capability:: +A feature that manages Amazon EBS volumes in Amazon EKS Auto Mode clusters. This capability automatically handles volume provisioning and lifecycle management for pods that need persistent storage. + +EKS Auto Mode -- Load Balancing Capability:: +A feature that manages Application Load Balancers and Network Load Balancers in Amazon EKS Auto Mode clusters. This capability automatically configures load balancers based on your service requirements. diff --git a/latest/ug/automode/auto-learn-iam.adoc b/latest/ug/automode/auto-learn-iam.adoc new file mode 100644 index 00000000..020547d0 --- /dev/null +++ b/latest/ug/automode/auto-learn-iam.adoc @@ -0,0 +1,196 @@ +//!!NODE_ROOT
+ +[.topic] +[[auto-learn-iam,auto-learn-iam.title]] += Learn about identity and access in EKS Auto Mode +:info_titleabbrev: Identity & access +:info_doctype: section + +include::../attributes.txt[] + + +This topic describes the Identity and Access Management (IAM) roles and permissions required to use EKS Auto Mode. EKS Auto Mode uses two primary IAM roles: a Cluster IAM Role and a Node IAM Role. These roles work in conjunction with EKS Pod Identity and EKS access entries to provide comprehensive access management for your EKS clusters. + +When you configure EKS Auto Mode, you will need to set up these IAM roles with specific permissions that allow {aws} services to interact with your cluster resources. This includes permissions for managing compute resources, storage volumes, load balancers, and networking components. Understanding these role configurations is essential for proper cluster operation and security. + +In EKS Auto Mode, {aws} IAM roles are automatically mapped to Kubernetes permissions through EKS access entries, removing the need for manual configuration of `aws-auth` ConfigMaps or custom bindings. When you create a new auto mode cluster, EKS automatically creates the corresponding Kubernetes permissions using Access entries, ensuring that {aws} services and cluster components have the appropriate access levels within both the {aws} and Kubernetes authorization systems. This automated integration reduces configuration complexity and helps prevent permission-related issues that commonly occur when managing EKS clusters. + +[[auto-learn-cluster-iam-role,auto-learn-cluster-iam-role.title]] +== Cluster IAM role + +The Cluster IAM role is an {aws} Identity and Access Management (IAM) role used by Amazon EKS to manage permissions for Kubernetes clusters. This role grants Amazon EKS the necessary permissions to interact with other {aws} services on behalf of your cluster, and is automatically configured with Kubernetes permissions using EKS access entries. + +* You must attach {aws} IAM policies to this role. +* {eam} attaches Kubernetes permissions to this role automatically using EKS access entries. +* With {eam}, {aws} suggests creating a single Cluster IAM Role per {aws} account. +* {aws} suggests naming this role `AmazonEKSAutoClusterRole`. +* This role requires permissions for multiple {aws} services to manage resources including EBS volumes, Elastic Load Balancers, and EC2 instances. +* The suggested configuration for this role includes multiple {aws} managed IAM policies, related to the different {caps} of {eam}. +** `AmazonEKSComputePolicy` +** `AmazonEKSBlockStoragePolicy` +** `AmazonEKSLoadBalancingPolicy` +** `AmazonEKSNetworkingPolicy` +** `AmazonEKSClusterPolicy` + +For more information about the Cluster IAM Role and {aws} managed IAM policies, see: + +* <> +* <> + +For more information about Kubernetes access, see: + +* <> + +[[auto-learn-node-iam-role,auto-learn-node-iam-role.title]] +== Node IAM role + +The Node IAM role is an {aws} Identity and Access Management (IAM) role used by Amazon EKS to manage permissions for worker nodes in Kubernetes clusters. This role grants EC2 instances running as Kubernetes nodes the necessary permissions to interact with {aws} services and resources, and is automatically configured with Kubernetes RBAC permissions using EKS access entries. + +* You must attach {aws} IAM policies to this role. +* {eam} attaches Kubernetes RBAC permissions to this role automatically using EKS access entries. +* {aws} suggests naming this role `AmazonEKSAutoNodeRole`. +* With {eam}, {aws} suggests creating a single Node IAM Role per {aws} account. +* This role has limited permissions. The key permissions include assuming a Pod Identity Role, and pulling images from ECR. +* {aws} suggests the following {aws} managed IAM policies: +** `AmazonEKSWorkerNodeMinimalPolicy` +** `AmazonEC2ContainerRegistryPullOnly` + +For more information about the Cluster IAM Role and {aws} managed IAM policies, see: + +* <> +* <> + +For more information about Kubernetes access, see: + +* <> + +== Service-linked role + +Amazon EKS uses a service-linked role (SLR) for certain operations. A service-linked role is a unique type of IAM role that is linked directly to Amazon EKS. Service-linked roles are predefined by Amazon EKS and include all the permissions that the service requires to call other {aws} services on your behalf. + +{aws} automatically creates and configures the SLR. You can delete an SLR only after first deleting their related resources. This protects your Amazon EKS resources because you can't inadvertently remove permission to access the resources. + +The SLR policy grants Amazon EKS permissions to observe and delete core infrastructure components: EC2 resources (instances, network interfaces, security groups), ELB resources (load balancers, target groups), CloudWatch capabilities (logging and metrics), and IAM roles with "eks" prefix. It also enables private endpoint networking through VPC/hosted zone association and includes permissions for EventBridge monitoring and cleanup of EKS-tagged resources. + +For more information, see: + +* <> +* <> + + +[[tag-prop,tag-prop.title]] +== Custom {aws} tags for EKS Auto resources + +By default, the managed policies related to EKS Auto Mode do not permit applying user defined tags to Auto Mode provisioned {aws} resources. If you want to apply user defined tags to {aws} resources, you must attach additional permissions to the Cluster IAM Role with sufficient permissions to create and modify tags on {aws} resources. Below is an example of a policy that will allow unrestricted tagging access: + +[[auto-tag-policy,auto-tag-policy.title]] +.View custom tag policy example +[%collapsible, expand-section="_collapse_all_"] +==== +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Compute", + "Effect": "Allow", + "Action": [ + "ec2:CreateFleet", + "ec2:RunInstances", + "ec2:CreateLaunchTemplate" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/eks:eks-cluster-name": "${aws:PrincipalTag/eks:eks-cluster-name}" + }, + "StringLike": { + "aws:RequestTag/eks:kubernetes-node-class-name": "*", + "aws:RequestTag/eks:kubernetes-node-pool-name": "*" + } + } + }, + { + "Sid": "Storage", + "Effect": "Allow", + "Action": [ + "ec2:CreateVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:snapshot/*" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/eks:eks-cluster-name": "${aws:PrincipalTag/eks:eks-cluster-name}" + } + } + }, + { + "Sid": "Networking", + "Effect": "Allow", + "Action": "ec2:CreateNetworkInterface", + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/eks:eks-cluster-name": "${aws:PrincipalTag/eks:eks-cluster-name}" + }, + "StringLike": { + "aws:RequestTag/eks:kubernetes-cni-node-name": "*" + } + } + }, + { + "Sid": "LoadBalancer", + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateRule", + "ec2:CreateSecurityGroup" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/eks:eks-cluster-name": "${aws:PrincipalTag/eks:eks-cluster-name}" + } + } + }, + { + "Sid": "ShieldProtection", + "Effect": "Allow", + "Action": [ + "shield:CreateProtection" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/eks:eks-cluster-name": "${aws:PrincipalTag/eks:eks-cluster-name}" + } + } + }, + { + "Sid": "ShieldTagResource", + "Effect": "Allow", + "Action": [ + "shield:TagResource" + ], + "Resource": "arn:aws:shield::*:protection/*", + "Condition": { + "StringEquals": { + "aws:RequestTag/eks:eks-cluster-name": "${aws:PrincipalTag/eks:eks-cluster-name}" + } + } + } + ] +} +---- +==== + + +== Access Policy Reference + +For more information about the Kubernetes permissions used by EKS Auto Mode, see <>. diff --git a/latest/ug/automode/auto-migrate-karpenter.adoc b/latest/ug/automode/auto-migrate-karpenter.adoc new file mode 100644 index 00000000..081d5253 --- /dev/null +++ b/latest/ug/automode/auto-migrate-karpenter.adoc @@ -0,0 +1,137 @@ +//!!NODE_ROOT
+[.topic] +[[auto-migrate-karpenter,auto-migrate-karpenter.title]] += Migrate from Karpenter to EKS Auto Mode using kubectl +:info_doctype: section +:info_title: Migrate from Karpenter to EKS Auto Mode using kubectl +:info_titleabbrev: Migrate from Karpenter +:info_abstract: Migrate from Karpenter to EKS Auto Mode using kubectl + +include::../attributes.txt[] + +This topic walks you through the process of migrating workloads from Karpenter to Amazon EKS Auto Mode using kubectl. The migration can be performed gradually, allowing you to move workloads at your own pace while maintaining cluster stability and application availability throughout the transition. + +The step-by-step approach outlined below enables you to run Karpenter and EKS Auto Mode side by side during the migration period. This dual-operation strategy helps ensure a smooth transition by allowing you to validate workload behavior on EKS Auto Mode before completely decommissioning Karpenter. You can migrate applications individually or in groups, providing flexibility to accommodate your specific operational requirements and risk tolerance. + +## Prerequisites + +Before beginning the migration, ensure you have: + +* Karpenter v1.1 or later installed on your cluster. For more information, see https://karpenter.sh/docs/upgrading/upgrade-guide/#upgrading-to-110[Upgrading to 1.1.0+] in the Karpenter docs. +* `kubectl` installed and connected to your cluster. For more information, see <>. + +This topic assumes you are familiar with Karpenter and NodePools. For more information, see the https://karpenter.sh/[Karpenter Documentation.] + +## Step 1: Enable EKS Auto Mode on the cluster + +Enable EKS Auto Mode on your existing cluster using the {aws} CLI or Management Console. For more information, see <>. + +[NOTE] +==== +While enabling EKS Auto Mode, don't enable the `general purpose` nodepool at this stage during transition. This node pool is not selective. + +For more information, see <>. +==== + + +## Step 2: Create a tainted EKS Auto Mode NodePool + +Create a new NodePool for EKS Auto Mode with a taint. This ensures that existing pods won't automatically schedule on the new EKS Auto Mode nodes. This node pool uses the `default` `NodeClass` built into EKS Auto Mode. For more information, see <>. + +Example node pool with taint: + +```yaml +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + name: eks-auto-mode +spec: + template: + spec: + requirements: + - key: "eks.amazonaws.com/instance-category" + operator: In + values: ["c", "m", "r"] + nodeClassRef: + group: eks.amazonaws.com + kind: NodeClass + name: default + taints: + - key: "eks-auto-mode" + effect: "NoSchedule" +``` + +Update the requirements for the node pool to match the Karpenter configuration you are migrating form. You need at least one requirement. + +## Step 3: Update workloads for migration + +Identify and update the workloads you want to migrate to EKS Auto Mode. Add both tolerations and node selectors to these workloads: + +```yaml +apiVersion: apps/v1 +kind: Deployment +spec: + template: + spec: + tolerations: + - key: "eks-auto-mode" + effect: "NoSchedule" + nodeSelector: + eks.amazonaws.com/compute-type: auto +``` + +This change allows the workload to be scheduled on the new EKS Auto Mode nodes. + +EKS Auto Mode uses different labels than Karpenter. Labels related to EC2 managed instances start with `eks.amazonaws.com`. For more information, see <>. + +## Step 4: Gradually migrate workloads + +Repeat Step 3 for each workload you want to migrate. This allows you to move workloads individually or in groups, based on your requirements and risk tolerance. + +## Step 5: Remove the original Karpenter NodePool + +Once all workloads have been migrated, you can remove the original Karpenter NodePool: + +``` +kubectl delete nodepool +``` + +## Step 6: Remove taint from EKS Auto Mode NodePool (Optional) + +If you want EKS Auto Mode to become the default for new workloads, you can remove the taint from the EKS Auto Mode NodePool: + +```yaml +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + name: eks-auto-mode +spec: + template: + spec: + nodeClassRef: + group: eks.amazonaws.com + kind: NodeClass + name: default + # Remove the taints section +``` + +## Step 7: Remove node selectors from workloads (Optional) + +If you've removed the taint from the EKS Auto Mode NodePool, you can optionally remove the node selectors from your workloads, as EKS Auto Mode is now the default: + +```yaml +apiVersion: apps/v1 +kind: Deployment +spec: + template: + spec: + # Remove the nodeSelector section + tolerations: + - key: "eks-auto-mode" + effect: "NoSchedule" +``` + +## Step 8: Uninstall Karpenter from your cluster + +The steps to remove Karpenter depend on how you installed it. For more information, see the https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/#create-a-cluster-and-add-karpenter[Karpenter install instructions] and the https://helm.sh/docs/helm/helm_uninstall/[Helm Uninstall command]. + diff --git a/latest/ug/automode/auto-migrate-mng.adoc b/latest/ug/automode/auto-migrate-mng.adoc new file mode 100644 index 00000000..10fe6d8b --- /dev/null +++ b/latest/ug/automode/auto-migrate-mng.adoc @@ -0,0 +1,30 @@ +//!!NODE_ROOT
+ +[.topic] +[[auto-migrate-mng,auto-migrate-mng.title]] += Migrate from EKS Managed Node Groups to EKS Auto Mode +:info_doctype: section +:info_title: Migrate from EKS Managed Node Groups to EKS Auto Mode +:info_titleabbrev: Migrate from Managed Node Groups +:info_abstract: Migrate from EKS Managed Node Groups + +include::../attributes.txt[] + +When transitioning your Amazon EKS cluster to use EKS auto mode, you can smoothly migrate your existing workloads from managed node groups using the eksctl CLI tool. This process ensures continuous application availability while EKS auto mode optimizes your compute resources. The migration can be performed with minimal disruption to your running applications. + +This topic walks you through the steps to safely drain pods from your existing managed node groups and allow EKS auto mode to reschedule them on newly provisioned instances. By following this procedure, you can take advantage of EKS auto mode's intelligent workload consolidation while maintaining your application's availability throughout the migration. + +== Prerequisites + +* Cluster with {eam} enabled +* `eksctl` CLI installed and connected to your cluster. For more information, see <>. +* Karpenter is not installed on the cluster. + +== Procedure + +Use the following `eksctl` CLI command to initiate draining pods from the existing {mng} instances. {eam} will create new nodes to back the displaced pods. + +[source,cli] +---- +eksctl update auto-mode-config --drain-all-nodegroups +---- diff --git a/latest/ug/automode/auto-mng.adoc b/latest/ug/automode/auto-mng.adoc new file mode 100644 index 00000000..34af935c --- /dev/null +++ b/latest/ug/automode/auto-mng.adoc @@ -0,0 +1,153 @@ + +//!!NODE_ROOT
+[.topic] +[[auto-mng,auto-mng.title]] += Compare EKS Auto Mode with EKS managed node groups +:info_doctype: section +:info_title: Compare EKS Auto Mode with EKS managed node groups +:info_titleabbrev: Compare with Managed Node Groups +:info_abstract: Compare EKS Auto Mode with EKS managed node groups + + +include::../attributes.txt[] + +[IMPORTANT] +==== +*{aws} Internal:* The table below will be used to update the larger table at <> +==== + + + +[cols="3*", options="header"] +|=== +|Criteria |EKS managed node groups |EKS Auto Mode + +|Can be deployed to {aws} Outposts +|No +|No + +|Can be deployed to an {aws} Local Zone +|Yes +|No + +|Can run containers that require Windows +|Yes +|No + +|Can run containers that require Linux +|Yes +|Yes + +|Can run workloads that require the Inferentia chip +|Yes – Amazon Linux nodes only +|Yes + +|Can run workloads that require a GPU +|Yes – Amazon Linux nodes only +|Yes + +|Can run workloads that require Arm processors +|Yes +|Yes + +|Can run {aws} Bottlerocket +|Yes +|Yes - Required + +|Pods share a kernel runtime environment with other Pods +|Yes – All of your Pods on each of your nodes +|Yes + +|Pods share CPU, memory, storage, and network resources with other Pods. +|Yes – Can result in unused resources on each node +|Yes + +|Pods can use more hardware and memory than requested in Pod specs +|Yes – If the Pod requires more resources than requested, and resources are available on the node, the Pod can use additional resources. +|Yes + +|Must deploy and manage Amazon EC2 instances +|Yes +|No + +|Supports Custom Machine Images (AMIs) +|Yes +|No + +|Must secure, maintain, and patch the operating system of Amazon EC2 instances +|Yes +|No + +|Can provide bootstrap arguments at deployment of a node, such as extra kubelet arguments. +|Yes – Using eksctl or a launch template with a custom AMI +|No - Use a `NodeClass` to configure nodes + +|Can assign IP addresses to Pods from a different CIDR block than the IP address assigned to the node. +|Yes – Using a launch template with a custom AMI. For more information, see Customize managed nodes with launch templates. +|No + +|Can SSH into node +|Yes +|No + +|Can deploy your own custom AMI to nodes +|Yes – Using a launch template +|No + +|Can deploy your own custom CNI to nodes +|Yes – Using a launch template with a custom AMI +|No + +|Must update node AMI on your own +|Yes – If you deployed an Amazon EKS optimized AMI, you're notified in the Amazon EKS console when updates are available. You can perform the update with one-click in the console. If you deployed a custom AMI, you're not notified in the Amazon EKS console when updates are available. You must perform the update on your own. +|No + +|Must update node Kubernetes version on your own +|Yes – If you deployed an Amazon EKS optimized AMI, you're notified in the Amazon EKS console when updates are available. You can perform the update with one-click in the console. If you deployed a custom AMI, you're not notified in the Amazon EKS console when updates are available. You must perform the update on your own. +|No + +|Can use Amazon EBS storage with Pods +|Yes +|Yes + +|Can use Amazon EFS storage with Pods +|Yes +|Yes + +|Can use Amazon FSx for Lustre storage with Pods +|Yes +|Yes + +|Can use Network Load Balancer for services +|Yes +|Yes + +|Pods can run in a public subnet +|Yes +|Yes + +|Can assign different VPC security groups to individual Pods +|Yes – Linux nodes only +|No + +|Can run Kubernetes DaemonSets +|Yes +|Yes + +|Support HostPort and HostNetwork in the Pod manifest +|Yes +|Yes + +|{aws} Region availability +|All Amazon EKS supported regions +|All Amazon EKS supported regions + +|Can run containers on Amazon EC2 dedicated hosts +|Yes +|No + +|Pricing +|Cost of Amazon EC2 instance that runs multiple Pods. For more information, see Amazon EC2 pricing. +| When EKS Auto Mode is enabled in your cluster, you pay a separate fee, in addition to the standard EC2 instance charges, for the instances launched using Auto Mode's compute capability. The amount varies with the instance type launched and the {aws} region where your cluster is located. For more information, see link:eks/pricing/["Amazon EKS pricing",type="marketing"]. + +|=== diff --git a/latest/ug/automode/auto-net-pol.adoc b/latest/ug/automode/auto-net-pol.adoc new file mode 100644 index 00000000..4194fc26 --- /dev/null +++ b/latest/ug/automode/auto-net-pol.adoc @@ -0,0 +1,74 @@ +//!!NODE_ROOT
+[.topic] +[[auto-net-pol,auto-net-pol.title]] += Use Network Policies with EKS Auto Mode +:info_doctype: section +:info_titleabbrev: Use network policies + +include::../attributes.txt[] + +//add to ToC + +Network policies allow you to control traffic flow at the IP address or port level within your Amazon EKS cluster. This topic explains how to enable and use network policies with EKS Auto Mode. + +## Prerequisites + +* An Amazon EKS cluster with EKS Auto Mode enabled +* kubectl configured to connect to your cluster + +## Step 1: Enable Network Policy Controller + +To use network policies with EKS Auto Mode, you first need to enable the Network Policy Controller by applying a ConfigMap to your cluster. + +. Create a file named `enable-network-policy.yaml` with the following content: ++ +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: amazon-vpc-cni + namespace: kube-system +data: + enable-network-policy-controller: "true" +``` +. Apply the ConfigMap to your cluster: ++ +``` +kubectl apply -f enable-network-policy.yaml +``` + +## Step 2: Enable Network Policies in Node Class + +Before you can use network policies, you need to ensure that your Node Class is configured to support them. Follow these steps: + +. Create or edit a Node Class YAML file (e.g., `nodeclass-network-policy.yaml`) with the following content: ++ +```yaml +apiVersion: eks.amazonaws.com/v1 +kind: NodeClass +metadata: + name: network-policy-enabled +spec: + # Enables network policy support + networkPolicy: DefaultAllow + # Optional: Enables logging for network policy events + networkPolicyEventLogs: Enabled + # Include other Node Class configurations as needed +``` +. Apply the Node Class configuration to your cluster: ++ +``` +kubectl apply -f nodeclass-network-policy.yaml +``` +. Verify that the Node Class has been created: ++ +``` +kubectl get nodeclass network-policy-enabled +``` +. Update your Node Pool to use this Node Class. For more information, see <>. + +Once your nodes are using this Node Class, they will be able to enforce network policies. You can now proceed to create and apply network policies to control traffic within your cluster. For all the node class configuration options, see <>. + +## Step 3: Create and test network policies + +Your EKS Auto Mode cluster is now configured to support Kubernetes network policies. You can test this with the <>. diff --git a/latest/ug/automode/auto-networking.adoc b/latest/ug/automode/auto-networking.adoc new file mode 100644 index 00000000..711f7bd3 --- /dev/null +++ b/latest/ug/automode/auto-networking.adoc @@ -0,0 +1,70 @@ +//!!NODE_ROOT
+[.topic] +[[auto-networking,auto-networking.title]] += Learn about VPC Networking and Load Balancing in EKS Auto Mode +:info_doctype: section +:info_title: Learn about VPC networking and load balancing in EKS Auto Mode +:info_titleabbrev: Networking +:info_abstract: Learn about VPC networking and load balancing in EKS Auto Mode. + + +include::../attributes.txt[] + + +This topic explains how to configure Virtual Private Cloud (VPC) networking and load balancing features in EKS Auto Mode. While EKS Auto Mode manages most networking components automatically, you can still customize certain aspects of your cluster's networking configuration through NodeClass resources and load balancer annotations. + +When you use EKS Auto Mode, {aws} manages the VPC Container Network Interface (CNI) configuration and load balancer provisioning for your cluster. You can influence networking behaviors by defining NodeClass objects and applying specific annotations to your Service and Ingress resources, while maintaining the automated operational model that EKS Auto Mode provides. + +== VPC CNI networking + +With {eam}, you do not directly configure the {aws} VPC CNI. {aws} manages node and pod networking. Instead, you create a `NodeClass` Kubernetes object. + +=== Configure VPC CNI with NodeClass + +The NodeClass resource in EKS Auto Mode allows you to customize certain aspects of the VPC Container Network Interface (CNI) configuration without directly managing the CNI plugin. Through NodeClass, you can specify security group selections, control node placement across VPC subnets, set SNAT policies, configure network policies, and enable network event logging. This approach maintains the automated operational model of EKS Auto Mode while providing flexibility for network customization. + +You can use a NodeClass to: + +* Select a Security Group for Nodes +* Control how nodes are placed on VPC Subnets +* Set the Node SNAT Policy to `random` or `disabled` +* Set the Network Policy to Default Deny or Default Allow +* Enable Network Event Logging to a file. + +Learn how to xref:create-node-class[Create an Amazon EKS NodeClass]. + +=== Considerations + +{eam} supports: + +* EKS Network Policies. +* The `HostPort` and `HostNetwork` options for Kubernetes Pods. +* Pods in public or private subnets. + +{eam} does *not* support: + +* Security Groups per Pod (SGPP). +* Custom Networking. The IP Addresses of Pods and Nodes must be from the same CIDR Block. +* Warm IP, warm prefix, and warm ENI configurations. +* Minimum IP targets configuration. +* Enabling or disabling prefix delegation. +* Other configurations supported by the open-source {aws} CNI. +* Network Policy configurations such as conntrack timer customization (default is 300s). +* Exporting network event logs to CloudWatch. + + +[[auto-lb-consider,auto-lb-consider.title]] +== Load balancing + +You configure {aws} Elastic Load Balancers provisioned by {eam} using annotations on Service and Ingress resources. + +For more information, see <> or <>. + +=== Considerations for load balancing with {eam} + +* The default targeting mode is IP Mode, not Instance Mode. +* {eam} only supports Security Group Mode for Network Load Balancers. +* {aws} does not support migrating load balancers from the self managed {aws} load balancer controller to management by {eam}. +* The `networking.ingress.ipBlock` field in `TargetGroupBinding` spec is not supported. +* If your worker nodes use custom security groups (not `+eks-cluster-sg-*+` naming pattern), your cluster role needs additional IAM permissions. The default EKS-managed policy only allows EKS to modify security groups named `+eks-cluster-sg-*+`. Without permission to modify your custom security groups, EKS cannot add the required ingress rules that allow ALB/NLB traffic to reach your pods. +* You cannot bring your own target groups. diff --git a/latest/ug/automode/auto-reference.adoc b/latest/ug/automode/auto-reference.adoc new file mode 100644 index 00000000..25be1606 --- /dev/null +++ b/latest/ug/automode/auto-reference.adoc @@ -0,0 +1,38 @@ +//!!NODE_ROOT +[.topic] +include::../attributes.txt[] +[[auto-reference,auto-reference.title]] += Learn how EKS Auto Mode works +:info_doctype: section +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_title: Learn how EKS Auto Mode works +:info_titleabbrev: How it works +:info_abstract: Learn how EKS Auto Mode works + +[abstract] +-- +Reference information for EKS Auto Mode +-- + +Use this chapter to learn how the components of Amazon EKS Auto Mode clusters work. + +[.topiclist] +[[Topic List]] + +include::automode-learn-instances.adoc[leveloffset=+1] + +include::auto-learn-iam.adoc[leveloffset=+1] + +include::auto-networking.adoc[leveloffset=+1] + +//include::auto-mng.adoc[leveloffset=+1] + +//include::auto-security.adoc[leveloffset=+1] + +//include::term-reference.adoc[leveloffset=+1] + +//include::auto-glossary.adoc[leveloffset=+1] diff --git a/latest/ug/automode/auto-troubleshoot.adoc b/latest/ug/automode/auto-troubleshoot.adoc new file mode 100644 index 00000000..ee9897bf --- /dev/null +++ b/latest/ug/automode/auto-troubleshoot.adoc @@ -0,0 +1,88 @@ +//!!NODE_ROOT
+ +[.topic] +[[auto-troubleshoot,auto-troubleshoot.title]] += Troubleshoot EKS Auto Mode +:info_doctype: section +:info_title: Troubleshoot EKS Auto Mode +:info_titleabbrev: Troubleshoot +:info_abstract: Troubleshoot EKS Auto Mode + +include::../attributes.txt[] + + +With {eam}, {aws} assumes more {resp} for {e2i}s in {yaa}. EKS assumes {resp} for the container runtime on nodes, the operating system on the nodes, and certain controllers. This includes a block storage controller, a load balancing controller, and a compute controller. + +You must use {aws} and {k8s} APIs to troubleshoot nodes. You can: + +* Use a Kubernetes `NodeDiagnostic` resource to {ret} node logs. +* Use the {aws} EC2 CLI command `get-console-output` to {ret} console output from nodes. + +[NOTE] +==== +{eam} uses {emi}s. You cannot directly access {emi}s, including by SSH. +==== + +If you have a problem with a controller, you should research: + +* If the resources associated with that controller are properly formatted and valid. +* If the {aws} IAM and Kubernetes RBAC resources are properly configured for your cluster. For more information, see <>. + +[[auto-node-monitoring-agent,auto-node-monitoring-agent.title]] +== Node monitoring agent + +{eam} includes the Amazon EKS node monitoring agent. You can use this agent to view troubleshooting and debugging information about nodes. The node monitoring agent publishes Kubernetes `events` and node `conditions`. For more information, see <>. + +== Get console output from an {emi} by using the {aws} EC2 CLI + +This procedure helps with troubleshooting boot-time or kernel-level issues. + +First, you need to {det} the EC2 Instance ID of the instance associated with your workload. Second, use the {aws} CLI to {ret} the console output. + +. Confirm you have `kubectl` installed and connected to your cluster +. (Optional) Use the name of a Kubernetes Deployment to list the associated pods. ++ +[source,cli] +---- +kubectl get pods -l app= +---- +. Use the name of the Kubernetes Pod to determine the EC2 instance ID of the associated node. ++ +[source,cli] +---- +kubectl get pod -o wide +---- +. Use the EC2 instance ID to {ret} the console output. ++ +[source,cli] +---- +aws ec2 get-console-output --instance-id --latest --output text +---- + +== Get node logs by using the kubectl CLI + +For information about getting node logs, see <>. + +== View resources associated with {eam} in the {aws} Console + +You can use the {aws} console to view the status of resources associated with {yec}. + +* link:ec2/home#Volumes["EBS Volumes",type="console"] +** View EKS Auto Mode volumes by searching for the tag key `eks:eks-cluster-name` +* link:ec2/home#LoadBalancers["Load Balancers",type="console"] +** View EKS Auto Mode load balancers by searching for the tag key `eks:eks-cluster-name` +* link:ec2/home#Instances["EC2 Instances",type="console"] +** View EKS Auto Mode instances by searching for the tag key `eks:eks-cluster-name` + +== View IAM Errors in {yaa} + +. Navigate to CloudTrail console +. Select "Event History" from the left navigation pane +. Apply error code filters: +** AccessDenied +** UnauthorizedOperation +** InvalidClientTokenId + +Look for errors related to your EKS cluster. Use the error messages to update your EKS access entries, Cluster IAM Role, or Node IAM Role. You may need to attach a new policy these roles with permissions for {eam}. + +//Ensure you are running the latest version of the {aws} CLI, eksctl, etc. diff --git a/latest/ug/automode/auto-upgrade.adoc b/latest/ug/automode/auto-upgrade.adoc new file mode 100644 index 00000000..8e3d503e --- /dev/null +++ b/latest/ug/automode/auto-upgrade.adoc @@ -0,0 +1,39 @@ +//!!NODE_ROOT
+[.topic] +[[auto-upgrade,auto-upgrade.title]] += Update the Kubernetes Version of an EKS Auto Mode cluster +:info_doctype: section +:info_titleabbrev: Update Kubernetes Version + +include::../attributes.txt[] + +This topic explains how to update the Kubernetes version of your Auto Mode cluster. Auto Mode simplifies the version update process by handling the coordination of control plane updates with node replacements, while maintaining workload availability through pod disruption budgets. + +When upgrading an Auto Mode cluster, many components that traditionally required manual updates are now managed as part of the service. Understanding the automated aspects of the upgrade process and your responsibilities helps ensure a smooth version transition for your cluster. + +== Learn about updates with EKS Auto Mode + +After you initiate a control plane upgrade, {eam} begins replacing nodes in your cluster. The new nodes have the corresponding new Kubernetes version. {eam} observes pod disruption budgets when upgrading nodes. + +Additionally, you no longer need to update components like: + +* CoreDNS +* KubeProxy +* {aws} Load Balancer Controller +* Karpenter +* {aws} EBS CSI Driver + +{eam} replaces these components with service functionality. + +You are still responsible for updating: + +* Apps and workloads deployed to your cluster +* Self-managed add-ons and controllers +* Amazon EKS Add-ons +** Learn how to <> + +Learn link:eks/latest/best-practices/cluster-upgrades.html["Best Practices for Cluster Upgrades",type="documentation"] + +== Start Cluster Update + +To start a cluster update, see <>. diff --git a/latest/ug/automode/auto-workloads.adoc b/latest/ug/automode/auto-workloads.adoc new file mode 100644 index 00000000..7a99b60c --- /dev/null +++ b/latest/ug/automode/auto-workloads.adoc @@ -0,0 +1,32 @@ +//!!NODE_ROOT +[.topic] +include::../attributes.txt[] +[[auto-workloads,auto-workloads.title]] += Run sample workloads in EKS Auto Mode clusters +:info_doctype: section +:info_title: Run workloads in EKS Auto Mode clusters +:info_titleabbrev: Run workloads +:info_abstract: Run workloads in EKS Auto Mode clusters + +[abstract] +-- +Run workloads in EKS Auto Mode clusters +-- + +This chapter provides examples of how to deploy different types of workloads to Amazon EKS clusters running in Auto Mode. The examples demonstrate key workload patterns including sample applications, load-balanced web applications, stateful workloads using persistent storage, and workloads with specific node placement requirements. Each example includes complete manifests and step-by-step deployment instructions that you can use as templates for your own applications. + +Before proceeding with the examples, ensure that you have an EKS cluster running in Auto Mode and that you have installed the {aws} CLI and kubectl. For more information, see <>. The examples assume basic familiarity with Kubernetes concepts and kubectl commands. + +You can use these use case-based samples to run workloads in EKS Auto Mode clusters. + +<>:: Shows how to deploy a sample workload to an EKS Auto Mode cluster using `kubectl` commands. +<>:: Shows how to deploy a containerized version of the 2048 game on Amazon EKS. +<>:: Shows how to deploy a sample stateful application to an EKS Auto Mode cluster. +<>:: Shows how to use an annotation to control if a workload is deployed to nodes managed by EKS Auto Mode. + + +include::automode-workload.adoc[leveloffset=+1] + +include::auto-elb-example.adoc[leveloffset=+1] + +include::sample-storage-workload.adoc[leveloffset=+1] diff --git a/latest/ug/automode/automode-attributes.txt b/latest/ug/automode/automode-attributes.txt new file mode 100644 index 00000000..e69de29b diff --git a/latest/ug/automode/automode-get-started-cli.adoc b/latest/ug/automode/automode-get-started-cli.adoc new file mode 100644 index 00000000..99eec8c1 --- /dev/null +++ b/latest/ug/automode/automode-get-started-cli.adoc @@ -0,0 +1,330 @@ +//!!NODE_ROOT
+ +include::../attributes.txt[] + +[.topic] +[[automode-get-started-cli,automode-get-started-cli.title]] += Create an EKS Auto Mode Cluster with the {aws} CLI +:info_doctype: section +:config: configuration +:info_title: Create an EKS Auto Mode Cluster with the {aws} CLI +:info_titleabbrev: {aws} CLI +:info_abstract: Create an EKS Auto Mode cluster with the {aws} CLI + +EKS Auto Mode Clusters automate routine cluster management tasks for compute, storage, and networking. For example, EKS Auto Mode Clusters automatically detect when additional nodes are required and provision new EC2 instances to meet workload demands. + +This topic guides you through creating a new EKS Auto Mode Cluster using the {aws} CLI and optionally deploying a sample workload. + +## Prerequisites + +* The latest version of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device. To check your current version, use `aws --version`. To install the latest version, see link:cli/latest/userguide/getting-started-install.html["Installing",type="documentation"] and link:cli/latest/userguide/cli-chap-configure.html#cli-configure-quickstart-config["Quick configuration",type="documentation"] with aws configure in the {aws} Command Line Interface User Guide. +** Login to the CLI with sufficent IAM permissions to create {aws} resources including IAM Policies, IAM Roles, and EKS Clusters. +* The kubectl command line tool installed on your device. {aws} suggests you use the same kubectl version as the Kubernetes version of your EKS Cluster. To install or upgrade kubectl, see <>. + +## Specify VPC subnets + +Amazon EKS Auto Mode deploy nodes to VPC subnets. When creating an EKS cluster, you must specify the VPC subnets where the nodes will be deployed. You can use the default VPC subnets in your {aws} account or create a dedicated VPC for critical workloads. + +* {aws} suggests creating a dedicated VPC for your cluster. Learn how to <>. +* The EKS Console assists with creating a new VPC. Learn how to <>. +* Alternatively, you can use the default VPC of your {aws} account. Use the following instructions to find the Subnet IDs. + +[[auto-find-subnet,auto-find-subnet.title]] +.To find the Subnet IDs of your default VPC +[%collapsible, expand-section="_collapse_all_"] +==== + +*Using the {aws} CLI:* + +. Run the following command to list the default VPC and its subnets: ++ +``` +aws ec2 describe-subnets --filters "Name=vpc-id,Values=$(aws ec2 describe-vpcs --query 'Vpcs[?IsDefault==`true`].VpcId' --output text)" --query 'Subnets[*].{ID:SubnetId,AZ:AvailabilityZone}' --output table +``` ++ +. Save the output and note the **Subnet IDs**. ++ +Sample output: ++ +``` +---------------------------------------- +| DescribeSubnets | +---------------------------------------- +| SubnetId | AvailabilityZone | +|--------------------|---------------------| +| subnet-012345678 | us-west-2a | +| subnet-234567890 | us-west-2b | +| subnet-345678901 | us-west-2c | +---------------------------------------- +``` + +==== + +[[auto-mode-create-roles,auto-mode-create-roles.title]] +== IAM Roles for EKS Auto Mode Clusters + +[[auto-roles-cluster-iam-role,auto-roles-cluster-iam-role.title]] +=== Cluster IAM Role + +EKS Auto Mode requires a Cluster IAM Role to perform actions in your {aws} account, such as provisioning new EC2 instances. You must create this role to grant EKS the necessary permissions. {aws} recommends attaching the following {aws} managed policies to the Cluster IAM Role: + +* xref:security-iam-awsmanpol-AmazonEKSComputePolicy[AmazonEKSComputePolicy] +* xref:security-iam-awsmanpol-AmazonEKSBlockStoragePolicy[AmazonEKSBlockStoragePolicy] +* xref:security-iam-awsmanpol-AmazonEKSLoadBalancingPolicy[AmazonEKSLoadBalancingPolicy] +* xref:security-iam-awsmanpol-AmazonEKSNetworkingPolicy[AmazonEKSNetworkingPolicy] +* xref:security-iam-awsmanpol-amazoneksclusterpolicy[AmazonEKSClusterPolicy] + +[[auto-roles-node-iam-role,auto-roles-node-iam-role.title]] +=== Node IAM Role + +When you create an EKS Auto Mode cluster, you specify a Node IAM Role. When EKS Auto Mode creates nodes to process pending workloads, each new EC2 instance node is assigned the Node IAM Role. This role allows the node to communicate with EKS but is generally not accessed by workloads running on the node. + +If you want to grant permissions to workloads running on a node, use EKS Pod Identity. For more information, see <>. + +You must create this role and attach the following {aws} managed policy: + +* xref:security-iam-awsmanpol-AmazonEKSWorkerNodeMinimalPolicy[AmazonEKSWorkerNodeMinimalPolicy] +* link:AmazonECR/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-AmazonEC2ContainerRegistryPullOnly["AmazonEC2ContainerRegistryPullOnly",type="documentation"] + +[discrete] +#### **Service-Linked Role** + +EKS Auto Mode also requires a Service-Linked Role, which is automatically created and configured by {aws}. For more information, see xref:using-service-linked-roles-eks[AWSServiceRoleForAmazonEKS]. + +## **Create an EKS Auto Mode Cluster IAM Role** + +### Step 1: Create the Trust Policy + +Create a trust policy that allows the Amazon EKS service to assume the role. Save the policy as `trust-policy.json`: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +``` + +### Step 2: Create the IAM Role + +Use the trust policy to create the Cluster IAM Role: + +``` +aws iam create-role \ + --role-name AmazonEKSAutoClusterRole \ + --assume-role-policy-document file://trust-policy.json +``` + +### Step 3: Note the Role ARN + +Retrieve and save the ARN of the new role for use in subsequent steps: + +``` +aws iam get-role --role-name AmazonEKSAutoClusterRole --query "Role.Arn" --output text +``` + +### Step 4: Attach Required Policies + +Attach the following {aws} managed policies to the Cluster IAM Role to grant the necessary permissions: + +**AmazonEKSClusterPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSClusterPolicy +``` + +**AmazonEKSComputePolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSComputePolicy +``` + +**AmazonEKSBlockStoragePolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy +``` + +**AmazonEKSLoadBalancingPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy +``` + +**AmazonEKSNetworkingPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy +``` + +## **Create an EKS Auto Mode Node IAM Role** + +### Step 1: Create the Trust Policy + +Create a trust policy that allows the Amazon EKS service to assume the role. Save the policy as `node-trust-policy.json`: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` + +#### Step 2: Create the Node IAM Role + +Use the **node-trust-policy.json** file from the previous step to define which entities can assume the role. Run the following command to create the Node IAM Role: + +``` +aws iam create-role \ + --role-name AmazonEKSAutoNodeRole \ + --assume-role-policy-document file://node-trust-policy.json +``` + +#### Step 3: Note the Role ARN + +After creating the role, retrieve and save the ARN of the Node IAM Role. You will need this ARN in subsequent steps. Use the following command to get the ARN: + +``` +aws iam get-role --role-name AmazonEKSAutoNodeRole --query "Role.Arn" --output text +``` + +#### Step 4: Attach Required Policies + +Attach the following {aws} managed policies to the Node IAM Role to provide the necessary permissions: + +**AmazonEKSWorkerNodeMinimalPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoNodeRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy +``` + +**AmazonEC2ContainerRegistryPullOnly**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoNodeRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly +``` + +## **Create an EKS Auto Mode Cluster** + +### Overview + +To create an EKS Auto Mode Cluster using the {aws} CLI, you will need the following parameters: + +* `cluster-name`: The name of the cluster. +* `k8s-version`: The Kubernetes version (e.g., 1.31). +* `subnet-ids`: Subnet IDs identified in the previous steps. +* `cluster-role-arn`: ARN of the Cluster IAM Role. +* `node-role-arn`: ARN of the Node IAM Role. + +#### Default Cluster Configurations + +Review these default values and features before creating the cluster: + +* `nodePools`: EKS Auto Mode includes general-purpose and system default Node Pools. Learn more about xref:create-node-pool[Node Pools]. + +**Note:** Node Pools in EKS Auto Mode differ from Amazon EKS Managed Node Groups but can coexist in the same cluster. + +* `computeConfig.enabled`: Automates routine compute tasks, such as creating and deleting EC2 instances. +* `kubernetesNetworkConfig.elasticLoadBalancing.enabled`: Automates load balancing tasks, including creating and deleting Elastic Load Balancers. +* `storageConfig.blockStorage.enabled`: Automates storage tasks, such as creating and deleting Amazon EBS volumes. +* `accessConfig.authenticationMode`: Requires EKS access entries. Learn more about xref:grant-k8s-access[EKS authentication modes]. + +#### Run the Command + +Use the following command to create the cluster: + +``` +aws eks create-cluster \ + --region ${AWS_REGION} \ + --cli-input-json \ + "{ + \"name\": \"${CLUSTER_NAME}\", + \"version\": \"${K8S_VERSION}\", + \"roleArn\": \"${CLUSTER_ROLE_ARN}\", + \"resourcesVpcConfig\": { + \"subnetIds\": ${SUBNETS_JSON}, + \"endpointPublicAccess\": true, + \"endpointPrivateAccess\": true + }, + \"computeConfig\": { + \"enabled\": true, + \"nodeRoleArn\":\"${NODE_ROLE_ARN}\", + \"nodePools\": [\"general-purpose\", \"system\"] + }, + \"kubernetesNetworkConfig\": { + \"elasticLoadBalancing\": { + \"enabled\": true + } + }, + \"storageConfig\": { + \"blockStorage\": { + \"enabled\": true + } + }, + \"accessConfig\": { + \"authenticationMode\": \"API\" + } + } +``` + +### **Check Cluster Status** + +#### Step 1: Verify Cluster Creation + +Run the following command to check the status of your cluster. Cluster creation typically takes about 15 minutes: + +``` +aws eks describe-cluster --name "${CLUSTER_NAME}" --output json +``` + +#### Step 2: Update kubeconfig + +Once the cluster is ready, update your local kubeconfig file to enable `kubectl` to communicate with the cluster. This configuration uses the {aws} CLI for authentication. + +``` +aws eks update-kubeconfig --name "${CLUSTER_NAME}" +``` + +#### Step 3: Verify Node Pools + +List the Node Pools in your cluster using the following command: + +``` +kubectl get nodepools +``` + +== Next Steps + +* Learn how to xref:automode-workload[deploy a sample workload] to your new EKS Auto Mode cluster. diff --git a/latest/ug/automode/automode-get-started-console.adoc b/latest/ug/automode/automode-get-started-console.adoc new file mode 100644 index 00000000..714f2b2a --- /dev/null +++ b/latest/ug/automode/automode-get-started-console.adoc @@ -0,0 +1,54 @@ +//!!NODE_ROOT
+ +include::../attributes.txt[] + +[.topic] +[[automode-get-started-console,automode-get-started-console.title]] += Create an EKS Auto Mode Cluster with the {aws} Management Console +:info_doctype: section +:info_title: Create an EKS Auto Mode Cluster with the {aws} Management Console +:info_titleabbrev: Management console +:info_abstract: Create an EKS Auto Mode cluster with the {aws} Management Console + +Creating an {eam} cluster in the {aws} Management Console requires less {config} than other options. EKS integrates with {aws} IAM and VPC Networking to help you create the resources associated with an EKS cluster. + +You have two options to create a cluster in the console: + +* Quick {config} (with EKS Auto Mode) +* Custom {config} + +In this topic, you will learn how to create an {eam} cluster using the Quick {config} option. + +== Create an EKS Auto Mode using the quick {config} option + +You must be logged into the {aws} management console with sufficent permissions to manage {aws} resources including: EC2 instances, EC2 networking, EKS clusters, and IAM roles. + +. Navigate to the EKS Console +. Click *Create cluster* +. Confirm the *Quick {config}* option is selected +. Determine the following values, or use the defaults for a test cluster. +** Cluster *Name* +** Kubernetes Version +. Select the Cluster IAM Role. If this is your first time creating an {eam} cluster, use the *Create {recd} role* option. +** Optionally, you can reuse a single Cluster IAM Role in {yaa} for all {eam} clusters. +** The Cluster IAM Role includes required permissions for {eam} to manage resources including EC2 instances, EBS volumes, and EC2 load balancers. +** The *Create {recd} role* option pre-fills all fields with {recd} values. Select *Next* and then *Create*. The role will use the suggested `AmazonEKSAutoClusterRole` name. +** If you recently created a new role, use the *Refresh* icon to reload the role selection dropdown. +. Select the Node IAM Role. If this is your first time creating an {eam} cluster, use the *Create {recd} role* option. +** Optionally, you can reuse a single Node IAM Role in {yaa} for all {eam} clusters. +** The Node IAM Role includes required permissions for Auto Mode nodes to connect to the cluster. The Node IAM Role must include permissions to {ret} ECR images for your containers. +** The *Create {recd} role* option pre-fills all fields with {recd} values. Select *Next* and then *Create*. The role will use the suggested `AmazonEKSAutoNodeRole` name. +** If you recently created a new role, use the *Refresh* icon to reload the role selection dropdown. +. Select the VPC for {yec}. Choose the *Create VPC* to create a new VPC for EKS, or choose a VPC you previously created for EKS. +** If you use the VPC Console to create a new VPC, {aws} suggests you create at least one NAT Gateway per Availability Zone. Otherwise, you can use all other defaults. +** For more information and details of IPv6 cluster requirements, see <>. +. (optional) {eam} automatically populates the private subnets for your selected VPC. You can remove unwanted subnets. +** EKS automatically selects private subnets from the VPC following best practices. You can optionally select additional subnets from the VPC, such as public subnets. +. (optional) Select *View quick configuration defaults* to review all {config} values for the new cluster. The table indicates some values are not editable after the cluster is created. +. Select *Create cluster* . Note it may take fifteen minutes for cluster creation to complete. + +== Next Steps + +* Learn how to xref:sample-storage-workload[Deploy a Sample Workload to {yec}] + +//call out refactored IAM diff --git a/latest/ug/automode/automode-get-started-eksctl.adoc b/latest/ug/automode/automode-get-started-eksctl.adoc new file mode 100644 index 00000000..8677b35b --- /dev/null +++ b/latest/ug/automode/automode-get-started-eksctl.adoc @@ -0,0 +1,83 @@ +//!!NODE_ROOT
+ +[.topic] +[[automode-get-started-eksctl,automode-get-started-eksctl.title]] += Create an EKS Auto Mode Cluster with the eksctl CLI +:info_doctype: section +:config: configuration +:info_title: Create an EKS Auto Mode Cluster with the eksctl CLI +:info_titleabbrev: eksctl CLI +:info_abstract: Create an EKS Auto Mode cluster with the eksctl CLI + + +include::../attributes.txt[] + +This topic shows you how to create an Amazon EKS Auto Mode cluster using the eksctl command line interface (CLI). You can create an Auto Mode cluster either by running a single CLI command or by applying a YAML configuration file. Both methods provide the same functionality, with the YAML approach offering more granular control over cluster settings. + +The eksctl CLI simplifies the process of creating and managing EKS Auto Mode clusters by handling the underlying {aws} resource creation and configuration. Before proceeding, ensure you have the necessary {aws} credentials and permissions configured on your local machine. This guide assumes you're familiar with basic Amazon EKS concepts and have already installed the required CLI tools. + +[NOTE] +==== +You must install version `0.195.0` or greater of eksctl. For more information, see https://github.com/eksctl-io/eksctl/releases[eksctl releases] on GitHub. +==== + + +== Create an {eam} cluster with a CLI command + +You must have the `aws` and `eksctl` tools installed. You must be logged into the {aws} CLI with sufficent permissions to manage {aws} resources including: EC2 instances, EC2 networking, EKS clusters, and IAM roles. For more information, see <>. + +Run the following command to create a new {eam} cluster with + +[source,cli] +---- +eksctl create cluster --name= --enable-auto-mode +---- + +//Cluster IAM Role? +//Update kubeconfig? + +== Create an {eam} cluster with a YAML file +:enai: enabling + +You must have the `aws` and `eksctl` tools installed. You must be logged into the {aws} CLI with sufficent permissions to manage {aws} resources including: EC2 instances, EC2 networking, EKS clusters, and IAM roles. For more information, see <>. + +Review the {eam} configuration options in the sample ClusterConfig resource below. For the full ClusterConfig specification, see the https://eksctl.io/usage/creating-and-managing-clusters/[eksctl documentation]. + +{aws} suggests {enai} {eam}. If this is your first time creating an {eam} cluster, leave the `nodeRoleARN` unspecified to create a Node IAM Role for {eam}. If you already have a Node IAM Role in {yaa}, {aws} suggests reusing it. + +{aws} suggests not specifying any value for `nodePools`. {eam} will create default node pools. You can use the Kubernetes API to create additional node pools. + +[source,yaml] +---- +# cluster.yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: + region: + +iam: + # ARN of the Cluster IAM Role + # optional, eksctl creates a new role if not supplied + # suggested to use one Cluster IAM Role per account + serviceRoleARN: + +autoModeConfig: + # defaults to false + enabled: boolean + # optional, defaults to [general-purpose, system]. + # suggested to leave unspecified + # To disable creation of nodePools, set it to the empty array ([]). + nodePools: []string + # optional, eksctl creates a new role if this is not supplied + # and nodePools are present. + nodeRoleARN: string +---- + +Save the `ClusterConfig` file as `cluster.yaml`, and use the following command to create the cluster: + +[source,cli] +---- +eksctl create cluster -f cluster.yaml +---- diff --git a/latest/ug/automode/automode-learn-instances.adoc b/latest/ug/automode/automode-learn-instances.adoc new file mode 100644 index 00000000..57220f30 --- /dev/null +++ b/latest/ug/automode/automode-learn-instances.adoc @@ -0,0 +1,112 @@ +//!!NODE_ROOT
+ +[.topic] +[[automode-learn-instances,automode-learn-instances.title]] += Learn about Amazon EKS Auto Mode Managed instances +:info_doctype: section +:am: EKS Auto Mode +:aam: Amazon {am} +:ec2i: EC2 Instance +:mi: managed instance +:emi: EC2 managed instance +:emi: EC2 {mi} +:emng: EKS Managed Node Group +:info_title: Learn about Amazon EKS Auto Mode managed instances +:info_titleabbrev: Managed instances +:info_abstract: Learn about Amazon EKS Auto Mode managed instances + +include::../attributes.txt[] + + +This topic explains how {aam} manages Amazon EC2 instances in your EKS cluster. When you enable {am}, your cluster's compute resources are automatically provisioned and managed by EKS, changing how you interact with the EC2 instances that serve as nodes in your cluster. + +Understanding how {aam} manages instances is essential for planning your workload deployment strategy and operational procedures. Unlike traditional EC2 instances or managed node groups, these instances follow a different lifecycle model where EKS assumes responsibility for many operational aspects, while restricting certain types of access and customization. + +{aam} automates routine tasks for creating new {ec2i}s, and attaches them as nodes to your EKS cluster. {am} detects when a workload can't fit onto existing nodes, and creates a new {ec2i}. + +{aam} is responsible for creating, deleting, and patching {ec2i}s. You are responsible for the containers and pods deployed on the instance. + +{ec2i}s created by {am} are different from other {ec2i}s, they are {mi}s. These {mi}s are owned by EKS and are more restricted. You can't directly access or install software on instances managed by {am}. + +{aws} suggests running either {eam} or self-managed Karpenter. You can install both during a migration or in an advanced configuration. If you have both installed, configure your node pools so that workloads are associated with either Karpenter or {eam}. + +For more information, see link:AWSEC2/latest/UserGuide/amazon-ec2-managed-instances.html["Amazon EC2 managed instances",type="documentation"] in the Amazon EC2 user guide. + +== Comparison table + +[cols="1,1", options="header"] +|=== + +| Standard {ec2i} +| {am} {mi} + +| You are responsible for patching and updating the instance. +| {aws} automatically patches and updates the instance. + +| EKS is not responsible for the software on the instance. +| EKS is responsible for certain software on the instance, such as `kubelet`, the container runtime, and the operating system. + +| You can delete the {ec2i} using the EC2 API. +| EKS determines the number of instances deployed in your account. If you delete a workload, EKS will reduce the number of instances in your account. + +| You can use SSH to access the {ec2i}. +| You can deploy pods and containers to the {mi}. + +| You determine the operating system and image (AMI). +| {aws} determines the operating system and image. + +| You can deploy workloads that rely on Windows or Ubuntu functionality. +| You can deploy containers based on Linux, but without specific OS dependencies. + +| You determine what instance type and family to launch. +| {aws} determines what instance type and family to launch. You can use a Node Pool to limit the instance types {eam} selects from. + +|=== + + +The following functionality works for both Managed instances and Standard EC2 instances: + +* You can view the instance in the {aws} console. +* You can use instance storage as ephemeral storage for workloads. + +== Supported instance reference + +// Source: https://code.amazon.com/packages/EKSKarpenterController/blobs/a56aeb0ddc3e8a54406421e8f3a091e8e13abea1/--/pkg/providers/instancetype/instancetype.go#L43-L49 + +EKS Auto Mode supports the following instance types: + +[cols="1,4",options="header"] +|=== +|Family |Instance Types + +|Compute Optimized (C ) +|c8g, c7a, c7g, c7gn, c7gd, c7i, c7i-flex, c6a, c6g, c6i, c6gn, c6id, c6in, c6gd, c5, c5a, c5d, c5ad, c5n, c4 + +|General Purpose (M) +|m8g, m7i, m7a, m7g, m7gd, m7i-flex, m6a, m6i, m6in, m6g, m6idn, m6id, m6gd, m5, m5a, m5ad, m5n, m5dn, m5d, m5zn, m4 + +|Memory Optimized (R ) +|r8g, r7a, r7iz, r7gd, r7i, r7g, r6a, r6i, r6id, r6in, r6idn, r6g, r6gd, r5, r5n, r5a, r5dn, r5b, r5ad, r5d, r4 + +|Burstable (T) +|t4g, t3, t3a, t2 + +|High Memory (Z/X) +|z1d, x8g, x2gd + +|Storage Optimized (I/D) +|i4g, i4i, i3, i3en, is4gen, d3, d3en, im4gn + +|Accelerated Computing (P/G/Inf/Trn) +|p5, p4d, p3, p3dn, gr6, g6, g6e, g5g, g5, g4dn, inf2, inf1, trn1, trn1n + +|High Performance Computing (X2) +|x2iezn, x2iedn, x2idn +|=== + +== Considerations + +* EKS Auto Mode automatically formats and configures NVMe local storage on supported instance types. For nodes with multiple NVMe drives, EKS sets up a RAID 0 array. This automation eliminates the need for manual formatting and RAID configuration of local NVMe storage in EKS clusters. +* Amazon EKS Auto Mode does not support {aws} Fault Injection Service. For more information, see link:resilience-hub/latest/userguide/testing.html["Managing Fault Injection Service experiments",type="documentation"] in the {aws} Resilience Hub User Guide. +* You do not need to install the `Neuron Device Plugin` on EKS Auto Mode nodes. +** If you have other types of nodes in your cluster, you need to configure the Neuron Device plugin to not run on auto mode nodes. For more information, see <>. diff --git a/latest/ug/automode/automode-workload.adoc b/latest/ug/automode/automode-workload.adoc new file mode 100644 index 00000000..015a1856 --- /dev/null +++ b/latest/ug/automode/automode-workload.adoc @@ -0,0 +1,134 @@ +//!!NODE_ROOT
+ +[.topic] +[[automode-workload,automode-workload.title]] += Deploy a sample inflate workload to an Amazon EKS Auto Mode cluster +:info_doctype: section +:info_title: Deploy a sample inflate workload to an Amazon EKS Auto Mode cluster +:info_titleabbrev: Deploy inflate workload +:info_abstract: Deploy a sample inflate workload to an Amazon EKS Auto Mode cluster + +include::../attributes.txt[] + +In this tutorial, you'll learn how to deploy a sample workload to an EKS Auto Mode cluster and observe how it automatically provisions the required compute resources. You'll use `kubectl` commands to watch the cluster's behavior and see firsthand how Auto Mode simplifies Kubernetes operations on {aws}. By the end of this tutorial, you'll understand how EKS Auto Mode responds to workload deployments by automatically managing the underlying compute resources, without requiring manual node group configuration. + +== Prerequisites + +* An Amazon EKS Auto Mode cluster with the compute capability enabled. Note the name and {aws} region of the cluster. +* An IAM principal, such as a user or role, with sufficent permissions to manage networking, compute, and EKS resources. +** For more information, see link:IAM/latest/UserGuide/access_policies_job-functions_create-policies.html["Creating roles and attaching policies in the IAM User Guide",type="documentation"] in the IAM User Guide. +* `aws` CLI installed and configured with an IAM identity. +* `kubectl` CLI installed and connected to cluster. +** For more information, see <>. + + +== Step 1: Review existing compute resources (optional) + +First, use `kubectl` to list the node pools on your cluster. + +[source,cli] +---- +kubectl get nodepools +---- + +Sample Output: +[source,shell] +---- +general-purpose +---- + +In this tutorial, we will deploy a workload configured to use the `general-purpose` node pool. This node pool is built into EKS Auto Mode, and includes reasonable defaults for general workloads, such as microservices and web apps. You can create your own node pool. For more information, see <>. + +Second, use `kubectl` to list the nodes connected to your cluster. + +[source,cli] +---- +kubectl get nodes +---- + +If you just created an EKS Auto Mode cluster, you will have no nodes. + +In this tutorial you will deploy a sample workload. If you have no nodes, or the workload cannot fit on existing nodes, EKS Auto Mode will provision a new node. + +== Step 2: Deploy a sample application to the cluster + +Review the following Kubernetes Deployment and save it as `inflate.yaml` + +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inflate +spec: + replicas: 1 + selector: + matchLabels: + app: inflate + template: + metadata: + labels: + app: inflate + spec: + terminationGracePeriodSeconds: 0 + nodeSelector: + eks.amazonaws.com/compute-type: auto + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + containers: + - name: inflate + image: public.ecr.aws/eks-distro/kubernetes/pause:3.7 + resources: + requests: + cpu: 1 + securityContext: + allowPrivilegeEscalation: false +---- + +Note the `eks.amazonaws.com/compute-type: auto` selector requires the workload be deployed on an Amazon EKS Auto Mode node. + +Apply the Deployment to your cluster. + +[source,cli] +---- +kubectl apply -f inflate.yaml +---- + +== Step 3: Watch Kubernetes Events + +Use the following command to watch Kubernetes events, including creating a new node. Use `ctrl+c` to stop watching events. + +[source,cli] +---- +kubectl get events -w --sort-by '.lastTimestamp' +---- + +Use `kubectl` to list the nodes connected to your cluster again. Note the newly created node. + +[source,cli] +---- +kubectl get nodes +---- + +== Step 4: View nodes and instances in the {aws} console + +You can view EKS Auto Mode Nodes in the EKS console, and the associated EC2 instances in the EC2 console. + +EC2 Instances deployed by EKS Auto Mode are restricted. You cannot run arbitrary commands on EKS Auto Mode nodes. + +== Step 5: Delete the deployment + +Use `kubectl` to delete the sample deployment + +[source,cli] +---- +kubectl delete -f inflate.yaml +---- + +If you have no other workloads deployed to your cluster, the node created by EKS Auto Mode will be empty. + +In the default configration, EKS Auto Mode detects nodes that have been empty for thirty seconds, and terminates them. + +Use `kubectl` or the EC2 console to confirm the associated instance has been deleted. diff --git a/latest/ug/automode/automode.adoc b/latest/ug/automode/automode.adoc new file mode 100644 index 00000000..a9a6353a --- /dev/null +++ b/latest/ug/automode/automode.adoc @@ -0,0 +1,99 @@ +//!!NODE_ROOT + +[[automode,automode.title]] += Automate cluster infrastructure with EKS Auto Mode +:info_doctype: chapter +:toclevels: 2 +:toc: +:info_title: Automate cluster infrastructure with EKS Auto Mode +:info_titleabbrev: EKS Auto Mode +:info_abstract: Automate cluster infrastructure with EKS Auto Mode + +[abstract] +-- +Automate cluster infrastructure with EKS Auto Mode +-- + +include::../attributes.txt[] + +EKS Auto Mode extends {aws} management of Kubernetes clusters beyond the cluster itself, to allow {aws} to also set up and manage the infrastructure that enables the smooth operation of your workloads. +You can delegate key infrastructure decisions and leverage the expertise of {aws} for day-to-day operations. +Cluster infrastructure managed by {aws} includes many Kubernetes capabilities as core components, as opposed to add-ons, such as compute autoscaling, pod and service networking, application load balancing, cluster DNS, block storage, and GPU support. + +To get started, you can deploy a new EKS Auto Mode cluster or enable EKS Auto Mode on an existing cluster. +You can deploy, upgrade, or modify your EKS Auto Mode clusters using eksctl, the {aws} CLI, the {aws} Management Console, EKS APIs, or your preferred infrastructure-as-code tools. + +With EKS Auto Mode, you can continue using your preferred Kubernetes-compatible tools. EKS Auto Mode integrates with {aws} services like Amazon EC2, Amazon EBS, and ELB, leveraging {aws} cloud resources that follow best practices. These resources are automatically scaled, cost-optimized, and regularly updated to help minimize operational costs and overhead. + +## Features + +EKS Auto Mode provides the following high-level features: + +**Streamline Kubernetes Cluster Management**: EKS Auto Mode streamlines EKS management by providing production-ready clusters with minimal operational overhead. With EKS Auto Mode, you can run demanding, dynamic workloads confidently, without requiring deep EKS expertise. + +**Application Availability**: EKS Auto Mode dynamically adds or removes nodes in your EKS cluster based on the demands of your Kubernetes applications. This minimizes the need for manual capacity planning and ensures application availability. + +//what? +**Efficiency**: EKS Auto Mode is designed to compute costs while adhering to the flexibility defined by your NodePool and workload requirements. It also terminates unused instances and consolidates workloads onto other nodes to improve cost efficiency. + +**Security**: EKS Auto Mode uses AMIs that are treated as immutable for your nodes. These AMIs enforce locked-down software, enable SELinux mandatory access controls, and provide read-only root file systems. Additionally, nodes launched by EKS Auto Mode have a maximum lifetime of 21 days (which you can reduce), after which they are automatically replaced with new nodes. This approach enhances your security posture by regularly cycling nodes, aligning with best practices already adopted by many customers. + +**Automated Upgrades**: EKS Auto Mode keeps your Kubernetes cluster, nodes, and related components up to date with the latest patches, while respecting your configured Pod Disruption Budgets (PDBs) and NodePool Disruption Budgets (NDBs). Up to the 21-day maximum lifetime, intervention might be required if blocking PDBs or other configurations prevent updates. + +**Managed Components**: EKS Auto Mode includes Kubernetes and {aws} cloud features as core components that would otherwise have to be managed as add-ons. This includes built-in support for Pod IP address assignments, Pod network policies, local DNS services, GPU plug-ins, health checkers, and EBS CSI storage. + +**Customizable NodePools and NodeClasses**: If your workload requires changes to storage, compute, or networking configurations, you can create custom NodePools and NodeClasses using EKS Auto Mode. While default NodePools and NodeClasses can't be edited, you can add new custom NodePools or NodeClasses alongside the default configurations to meet your specific requirements. + + +## Automated Components + +EKS Auto Mode streamlines the operation of your Amazon EKS clusters by automating key infrastructure components. Enabling EKS Auto Mode further reduces the tasks to manage your EKS clusters. + +The following is a list of data plane components that are automated: + +* *Compute*: For many workloads, with EKS Auto Mode you can forget about many aspects of compute for your EKS clusters. These include: +** *Nodes*: EKS Auto Mode nodes are designed to be treated like appliances. EKS Auto Mode does the following: +*** Chooses an appropriate AMI that's configured with many services needed to run your workloads without intervention. +*** Locks down those features using SELinux enforcing mode and a read-only root file system. +*** Prevents direct access to the nodes by disallowing SSH or SSM access. +*** Includes GPU support, with separate kernel drivers and plugins for NVIDIA and Neuron GPUs, enabling high-performance workloads. +** *Auto scaling*: Relying on https://karpenter.sh/docs/[Karpenter] auto scaling, EKS Auto Mode monitors for unschedulable Pods and makes it possible for new nodes to be deployed to run those pods. As workloads are terminated, EKS Auto Mode dynamically disrupts and terminates nodes when they are no longer needed, optimizing resource usage. +** *Upgrades*: Taking control of your nodes streamlines EKS Auto Mode's ability to provide security patches and operating system and component upgrades as needed. Those upgrades are designed to provide minimal disruption of your workloads. EKS Auto Mode enforces a 21-day maximum node lifetime to ensure up-to-date software and APIs. +* *Load balancing*: EKS Auto Mode streamlines load balancing by integrating with Amazon's Elastic Load Balancing service, automating the provisioning and configuration of load balancers for Kubernetes Services and Ingress resources. It supports advanced features for both Application and Network Load Balancers, manages their lifecycle, and scales them to match cluster demands. This integration provides a production-ready load balancing solution adhering to {aws} best practices, allowing you to focus on applications rather than infrastructure management. +* *Storage*: EKS Auto Mode configures ephemeral storage for you by setting up volume types, volume sizes, encryption policies, and deletion policies upon node termination. +* *Networking*: EKS Auto Mode automates critical networking tasks for Pod and service connectivity. This includes IPv4/IPv6 support and the use of secondary CIDR blocks for extending IP address spaces. +* *Identity and Access Management*: You do not have to install the EKS Pod Identity Agent on EKS Auto Mode clusters. + +For more information about these components, see <>. + +## Configuration + +While EKS Auto Mode will effectively manage most of your data plane services without your intervention, there might be times when you want to change the behavior of some of those services. You can modify the configuration of your EKS Auto Mode clusters in the following ways: + +* *Kubernetes DaemonSets*: Rather than modify services installed on your nodes, you can instead use Kubernetes daemonsets. Daemonsets are designed to be managed by Kubernetes, but run on every node in the cluster. In this way, you can add special services for monitoring or otherwise watching over your nodes. + +* *Custom NodePools and NodeClasses*: Default NodePools and NodeClasses are configured by EKS Auto Mode and can't be edited. To customize node behavior, you can create additional NodePools or NodeClasses for use cases such as: +** Selecting specific instance types (for example, accelerated processors or EC2 Spot instances). + +** Isolating workloads for security or cost-tracking purposes. +** Configuring ephemeral storage settings like IOPS, size, and throughput. + +* *Load Balancing*: Some services, such as load balancing, that EKS Auto Mode runs as Kubernetes objects, can be configured directly on your EKS Auto Mode clusters. + +For more information about options for configuring EKS Auto Mode, see <>. + + + +include::create-auto.adoc[leveloffset=+1] + +include::migrate-auto.adoc[leveloffset=+1] + +include::auto-workloads.adoc[leveloffset=+1] + +include::settings-auto.adoc[leveloffset=+1] + +include::auto-reference.adoc[leveloffset=+1] + +include::auto-troubleshoot.adoc[leveloffset=+1] + +//include::wip.adoc[leveloffset=+1] diff --git a/latest/ug/automode/create-auto.adoc b/latest/ug/automode/create-auto.adoc new file mode 100644 index 00000000..10221587 --- /dev/null +++ b/latest/ug/automode/create-auto.adoc @@ -0,0 +1,45 @@ +//!!NODE_ROOT +[.topic] +include::../attributes.txt[] +[[create-auto,create-auto.title]] += Create a cluster with Amazon EKS Auto Mode +:info_doctype: section +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_title: Create cluster with EKS Auto Mode +:info_titleabbrev: Create cluster +:info_abstract: Learn about the tools needed for creating and working with an Amazon EKS cluster in EKS Auto Mode. +:keywords: getting, started, tutorials, quick, start + +[abstract] +-- +Learn about the tools needed for creating and working with an Amazon EKS cluster in EKS Auto Mode. +-- + + + +This chapter explains how to create an Amazon EKS cluster with Auto Mode enabled using various tools and interfaces. Auto Mode simplifies cluster creation by automatically configuring and managing the cluster's compute, networking, and storage infrastructure. You'll learn how to create an Auto Mode cluster using the {aws} CLI, {aws} Management Console, or the eksctl command line tool. + +[NOTE] +==== +EKS Auto Mode requires Kubernetes version 1.29 or greater. +==== + + +Choose your preferred tool based on your needs: The {aws} Management Console provides a visual interface ideal for learning about EKS Auto Mode features and creating individual clusters. The {aws} CLI is best suited for scripting and automation tasks, particularly when integrating cluster creation into existing workflows or CI/CD pipelines. The eksctl CLI offers a Kubernetes-native experience and is recommended for users familiar with Kubernetes tooling who want simplified command line operations with sensible defaults. + +Before you begin, ensure you have the necessary prerequisites installed and configured, including appropriate IAM permissions to create EKS clusters. To learn how to install CLI tools such as `kubectl`, `aws`, and `eksctl`, see <>. + +You can use the {aws} CLI, {aws} Management Console, or eksctl CLI to create a cluster with Amazon EKS Auto Mode. + +[.topiclist] +[[Topic List]] + +include::automode-get-started-eksctl.adoc[leveloffset=+1] + +include::automode-get-started-cli.adoc[leveloffset=+1] + +include::automode-get-started-console.adoc[leveloffset=+1] diff --git a/latest/ug/automode/create-node-class.adoc b/latest/ug/automode/create-node-class.adoc new file mode 100644 index 00000000..cf1bb913 --- /dev/null +++ b/latest/ug/automode/create-node-class.adoc @@ -0,0 +1,105 @@ +//!!NODE_ROOT
+ +[.topic] +[[create-node-class,create-node-class.title]] += Create a Node Class for Amazon EKS +:info_doctype: section +:info_titleabbrev: Create node class + +include::../attributes.txt[] + +Amazon EKS Node Classes provide granular control over the configuration of your {eam} managed nodes. A Node Class defines infrastructure-level settings that apply to groups of nodes in your EKS cluster, including network configuration, storage settings, and resource tagging. This topic explains how to create and configure a Node Class to meet your specific operational requirements. + +When you need to customize how {eam} provisions and configures EC2 instances beyond the default settings, creating a Node Class gives you precise control over critical infrastructure parameters. For example, you can specify private subnet placement for enhanced security, configure instance ephemeral storage for performance-sensitive workloads, or apply custom tagging for cost allocation. + +## Create a Node Class + +To create a Node Class, follow these steps: + +. Create a YAML file (for example, `nodeclass.yaml`) with your Node Class configuration +. Apply the configuration to your cluster using `kubectl` +. Reference the Node Class in your Node Pool configuration. For more information, see <>. + +You need `kubectl` installed and configured. For more information, see <>. + +### Basic Node Class Example + +Here's an example Node Class: + +//GDC: need to update this with IAM role and subnet selector + +```yaml +apiVersion: eks.amazonaws.com/v1 +kind: NodeClass +metadata: + name: private-compute +spec: + ephemeralStorage: + size: "160Gi" +``` + +This NodeClass increases the amount of ephemeral storage on the node. + +Apply this configuration using: + +```bash +kubectl apply -f nodeclass.yaml +``` + +Next, reference the Node Class in your Node Pool configuration. For more information, see <>. + +== Node Class Specification + +[source,yaml] +---- +apiVersion: eks.amazonaws.com/v1 +kind: NodeClass +metadata: + name: default +spec: + + # Required: Name of IAM Role for Nodes + role: "MyNodeRole" + + # Required: Subnet selection for node placement + subnetSelectorTerms: + - tags: + Name: "" + kubernetes.io/role/internal-elb: "1" + # Alternative using direct subnet ID + # - id: "subnet-0123456789abcdef0" + + # Required: Security group selection for nodes + securityGroupSelectorTerms: + - tags: + Name: "eks-cluster-node-sg" + # Alternative approaches: + # - id: "sg-0123456789abcdef0" + # - name: "eks-cluster-node-security-group" + + # Optional: Configure SNAT policy (defaults to Random) + snatPolicy: Random # or Disabled + + # Optional: Network policy configuration (defaults to DefaultAllow) + networkPolicy: DefaultAllow # or DefaultDeny + + # Optional: Network policy event logging (defaults to Disabled) + networkPolicyEventLogs: Disabled # or Enabled + + # Optional: Configure ephemeral storage (shown with default values) + ephemeralStorage: + size: "80Gi" # Range: 1-59000Gi or 1-64000G or 1-58Ti or 1-64T + iops: 3000 # Range: 3000-16000 + throughput: 125 # Range: 125-1000 + + # Optional: Additional EC2 tags + tags: + Environment: "production" + Team: "platform" +---- + +*Considerations:* + +* If you change the Node IAM Role associated with a NodeClass, you will need to create a new Access Entry. EKS automatically creates an Access Entry for the Node IAM Role during cluster creation. The Node IAM Role requires the `AmazonEKSAutoNodePolicy` EKS Access Policy. For more information, see <>. +* EKS limits the maximum number of pods on a node to 110. This limit is applied after the existing max pods calculation. For more information, see <>. +* If you want to propagate tags from Kubernetes to EC2, you need to configure additional IAM permissions. For more information, see <>. diff --git a/latest/ug/automode/create-node-pool.adoc b/latest/ug/automode/create-node-pool.adoc new file mode 100644 index 00000000..74ec9097 --- /dev/null +++ b/latest/ug/automode/create-node-pool.adoc @@ -0,0 +1,179 @@ +//!!NODE_ROOT
+[.topic] +[[create-node-pool,create-node-pool.title]] += Create a Node Pool for EKS Auto Mode +:info_doctype: section +:info_titleabbrev: Create node pool + +include::../attributes.txt[] + +Amazon EKS node pools provide a flexible way to manage compute resources in your Kubernetes cluster. This topic demonstrates how to create and configure node pools using Karpenter, a node provisioning tool that helps optimize cluster scaling and resource utilization. With Karpenter's NodePool resource, you can define specific requirements for your compute resources, including instance types, availability zones, architectures, and capacity types. + +The NodePool specification allows for fine-grained control over your EKS cluster's compute resources through various supported labels and requirements. These include options for specifying EC2 instance categories, CPU configurations, availability zones, architectures (ARM64/AMD64), and capacity types (spot/on-demand). You can also set resource limits for CPU and memory usage, ensuring your cluster stays within desired operational boundaries. + +EKS Auto Mode leverages well-known Kubernetes labels to provide consistent and standardized ways of identifying node characteristics. These labels, such as `topology.kubernetes.io/zone` for availability zones and `kubernetes.io/arch` for CPU architecture, follow established Kubernetes conventions. Additionally, EKS-specific labels (prefixed with `eks.amazonaws.com/`) extend this functionality with {aws}-specific attributes like instance types, CPU manufacturers, GPU capabilities, and networking specifications. This standardized labeling system enables seamless integration with existing Kubernetes tooling while providing deep {aws} infrastructure integration. + +## Create a NodePool + +Follow these steps to create a NodePool for your Amazon EKS cluster: + +. Create a YAML file named `nodepool.yaml` with your desired NodePool configuration. You can use the sample configuration below. +. Apply the NodePool to your cluster: ++ +```bash +kubectl apply -f nodepool.yaml +``` +. Verify that the NodePool was created successfully: ++ +```bash +kubectl get nodepools +``` +. (Optional) Monitor the NodePool status: ++ +```bash +kubectl describe nodepool default +``` + +Ensure that your NodePool references a valid NodeClass that exists in your cluster. The NodeClass defines {aws}-specific configurations for your compute resources. For more information, see <>. + +== Sample NodePool + +[source,yaml] +---- +apiVersion: karpenter.sh/v1 +kind: NodePool +metadata: + name: default +spec: + template: + metadata: + labels: + billing-team: my-team + spec: + nodeClassRef: + group: eks.amazonaws.com + kind: NodeClass + name: default + + requirements: + - key: "eks.amazonaws.com/instance-category" + operator: In + values: ["c", "m", "r"] + - key: "eks.amazonaws.com/instance-cpu" + operator: In + values: ["4", "8", "16", "32"] + - key: "topology.kubernetes.io/zone" + operator: In + values: ["us-west-2a", "us-west-2b"] + - key: "kubernetes.io/arch" + operator: In + values: ["arm64", "amd64"] + + limits: + cpu: "1000" + memory: 1000Gi +---- + + +== {eam} Supported + +{eam} supports the following well known labels. + +[role="no-scroll"] +[cols="3,1,4",options="header"] +|=== +|Label |Example |Description + +|topology.kubernetes.io/zone +|us-east-2a +|{aws} region + +|node.kubernetes.io/instance-type +|g4dn.8xlarge +|{aws} instance type + + +|kubernetes.io/arch +|amd64 +|Architectures are defined by link:https://github.com/golang/go/blob/master/src/go/build/syslist.go#L50[GOARCH values] on the instance + +|karpenter.sh/capacity-type +|spot +|Capacity types include `spot`, `on-demand` + +|eks.amazonaws.com/instance-hypervisor +|nitro +|Instance types that use a specific hypervisor + +|eks.amazonaws.com/compute-type +|auto +|Identifies EKS Auto Mode managed nodes + +|eks.amazonaws.com/instance-encryption-in-transit-supported +|true +|Instance types that support (or not) in-transit encryption + +|eks.amazonaws.com/instance-category +|g +|Instance types of the same category, usually the string before the generation number + +|eks.amazonaws.com/instance-generation +|4 +|Instance type generation number within an instance category + +|eks.amazonaws.com/instance-family +|g4dn +|Instance types of similar properties but different resource quantities + +|eks.amazonaws.com/instance-size +|8xlarge +|Instance types of similar resource quantities but different properties + +|eks.amazonaws.com/instance-cpu +|32 +|Number of CPUs on the instance + +|eks.amazonaws.com/instance-cpu-manufacturer +|aws +|Name of the CPU manufacturer + +|eks.amazonaws.com/instance-memory +|131072 +|Number of mebibytes of memory on the instance + +|eks.amazonaws.com/instance-ebs-bandwidth +|9500 +|Number of link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html#ebs-optimization-performance[maximum megabits] of EBS available on the instance + +|eks.amazonaws.com/instance-network-bandwidth +|131072 +|Number of link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html[baseline megabits] available on the instance + +|eks.amazonaws.com/instance-gpu-name +|t4 +|Name of the GPU on the instance, if available + +|eks.amazonaws.com/instance-gpu-manufacturer +|nvidia +|Name of the GPU manufacturer + +|eks.amazonaws.com/instance-gpu-count +|1 +|Number of GPUs on the instance + +|eks.amazonaws.com/instance-gpu-memory +|16384 +|Number of mebibytes of memory on the GPU + +|eks.amazonaws.com/instance-local-nvme +|900 +|Number of gibibytes of local nvme storage on the instance +|=== + +== {eam} Not Supported + +{eam} does not support the following labels. + +* {eam} only supports Linux +** `node.kubernetes.io/windows-build` +** `kubernetes.io/os` diff --git a/latest/ug/automode/create-storage-class.adoc b/latest/ug/automode/create-storage-class.adoc new file mode 100644 index 00000000..8e28c3a8 --- /dev/null +++ b/latest/ug/automode/create-storage-class.adoc @@ -0,0 +1,240 @@ +//!!NODE_ROOT
+[.topic] +[[create-storage-class,create-storage-class.title]] += Create a Storage Class +:info_doctype: section +:info_titleabbrev: Create storage class + + +include::../attributes.txt[] + +A StorageClass in Amazon EKS Auto Mode defines how Amazon EBS volumes are automatically provisioned when applications request persistent storage. This page explains how to create and configure a StorageClass that works with the Amazon EKS Auto Mode to provision EBS volumes. + +By configuring a StorageClass, you can specify default settings for your EBS volumes including volume type, encryption, IOPS, and other storage parameters. You can also configure the StorageClass to use {aws} KMS keys for encryption management. + +{eam} does not create a StorageClass for you. You must create a StorageClass referencing `ebs.csi.eks.amazonaws.com` to use the storage capability of {eam}. + +First, create a file named `storage-class.yaml`: + +[source,yaml] +---- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: auto-ebs-sc + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: ebs.csi.eks.amazonaws.com +volumeBindingMode: WaitForFirstConsumer +parameters: + type: gp3 + encrypted: "true" +---- + +Second, apply the storage class to your cluster. + +[source,bash] +---- +kubectl apply -f storage-class.yaml +---- + +*Key components:* + +- `provisioner: ebs.csi.eks.amazonaws.com` - Uses {eam} +- `volumeBindingMode: WaitForFirstConsumer` - Delays volume creation until a pod needs it +- `type: gp3` - Specifies the EBS volume type +- `encrypted: "true"` - EBS will encrypt any volumes created using the StorageClass. EBS will use the default `aws/ebs` key alias. For more information, see link:ebs/latest/userguide/how-ebs-encryption-works.html["How Amazon EBS encryption works",type="documentation"] in the Amazon EBS User Guide. This value is optional but suggested. +- `storageclass.kubernetes.io/is-default-class: "true"` - Kubernetes will use this storage class by default, unless you specify a different volume class on a persistent volume claim. This value is optional. Use caution when setting this value if you are migrating from a different storage controller. + +== Use self-managed KMS key to encrypt EBS volumes + +To use a self-managed KMS key to encrypt EBS volumes automated by EKS Auto Mode, you need to: + +. Create a self-managed KMS key. +** For more information, see link:kms/latest/developerguide/create-symmetric-cmk.html["Create a symmetric encryption KMS key",type="documentation"] or link:kms/latest/developerguide/services-ebs.html["How Amazon Elastic Block Store (Amazon EBS) uses KMS",type="documentation"] in the KMS User Guide. +. Create a new policy that permits access to the KMS key. +** Use the sample IAM policy below to create the policy. Insert the ARN of the new self-managed KMS key. For more information, see +link:IAM/latest/UserGuide/access_policies_job-functions_create-policies.html["Creating roles and attaching policies (console)",type="documentation"] in the {aws} IAM User Guide. +. Attach the policy to the EKS Cluster Role. +** Use the {aws} console to find the ARN of the EKS Cluster Role. The role information is visible in the *Overview* section. For more information, see <>. +. Update the `StorageClass` to reference the KMS Key ID at the `parameters.kmsKeyId` field. + +=== Sample self-managed KMS IAM Policy + +Update the following values in the policy below: + +* `` -- Your {aws} account ID, such as `111122223333` +* `` -- The {aws} region of your cluster, such as `us-west-2` + +[source,json] +---- +{ + "Version": "2012-10-17", + "Id": "key-auto-policy-3", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access through EBS for all principals in the account that are authorized to use EBS", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:CreateGrant", + "kms:DescribeKey" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "kms:CallerAccount": "", + "kms:ViaService": "ec2..amazonaws.com" + } + } + } + ] +} +---- + +=== Sample self-managed KMS StorageClass + +[source,yaml] +---- +parameters: + type: gp3 + encrypted: "true" + kmsKeyId: +---- + + +== StorageClass Parameters Reference + +For general information on the Kubernetes `StorageClass` resources, see https://kubernetes.io/docs/concepts/storage/storage-classes/[Storage Classes] in the Kubernetes Documentation. + +THe `parameters` section of the `StorageClass` resource is specific to {aws}. Use the following table to review available options. + +[role="no-scroll"] +[cols="4*", options="header"] +|=== +|Parameters |Values |Default |Description +|"csi.storage.k8s.io/fstype" +|xfs, ext2, ext3, ext4 +|ext4 +|File system type that will be formatted during volume creation. This parameter is case sensitive! +|"type" +|io1, io2, gp2, gp3, sc1, st1, standard, sbp1, sbg1 +|gp3 +|EBS volume type. +|"iopsPerGB" +| +| +|I/O operations per second per GiB. Can be specified for IO1, IO2, and GP3 volumes. +|"allowAutoIOPSPerGBIncrease" +|true, false +|false +|When `"true"`, the CSI driver increases IOPS for a volume when `iopsPerGB * ` is too low to fit into IOPS range supported by {aws}. This allows dynamic provisioning to always succeed, even when user specifies too small PVC capacity or `iopsPerGB` value. On the other hand, it may introduce additional costs, as such volumes have higher IOPS than requested in `iopsPerGB`. +|"iops" +| +| +|I/O operations per second. Can be specified for IO1, IO2, and GP3 volumes. +|"throughput" +| +|125 +|Throughput in MiB/s. Only effective when gp3 volume type is specified. +|"encrypted" +|true, false +|false +|Whether the volume should be encrypted or not. Valid values are "true" or "false". +|"blockExpress" +|true, false +|false +|Enables the creation of io2 Block Express volumes. +|"kmsKeyId" +| +| +|The full ARN of the key to use when encrypting the volume. If not specified, {aws} will use the default KMS key for the region the volume is in. This will be an auto-generated key called `/aws/ebs` if not changed. +|"blockSize" +| +| +|The block size to use when formatting the underlying filesystem. Only supported on linux nodes and with fstype `ext2`, `ext3`, `ext4`, or `xfs`. +|"inodeSize" +| +| +|The inode size to use when formatting the underlying filesystem. Only supported on linux nodes and with fstype `ext2`, `ext3`, `ext4`, or `xfs`. +|"bytesPerInode" +| +| +|The `bytes-per-inode` to use when formatting the underlying filesystem. Only supported on linux nodes and with fstype `ext2`, `ext3`, `ext4`. +|"numberOfInodes" +| +| +|The `number-of-inodes` to use when formatting the underlying filesystem. Only supported on linux nodes and with fstype `ext2`, `ext3`, `ext4`. +|"ext4BigAlloc" +|true, false +|false +|Changes the `ext4` filesystem to use clustered block allocation by enabling the `bigalloc` formatting option. Warning: `bigalloc` may not be fully supported with your node's Linux kernel. +|"ext4ClusterSize" +| +| +|The cluster size to use when formatting an `ext4` filesystem when the `bigalloc` feature is enabled. Note: The `ext4BigAlloc` parameter must be set to true. +|=== + +For more information, see the https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/parameters.md[{aws} EBS CSI Driver] on GitHub. + +== Considerations + +The block storage capability of EKS Auto Mode is different from the EBS CSI Driver. + +* Static Provisioning +** If you want to use externally-created EBS volumes with EKS Auto Mode, you need to manually add an {aws} tag with the key `eks:eks-cluster-name` and the value of the cluster name. +* Node Startup Taint +** You cannot use the node startup taint feature to prevent pod scheduling before storage capability readiness +* Custom Tags on Dynamically Provisioned Volumes +** You cannot use the extra-tag CLI flag to configure custom tags on dynamically provisioned EBS volumes +** You can use StorageClass Tagging to add custom tags. EKS Auto Mode will add tags to the associated {aws} resources. You will need to update the Cluster IAM Role for custom tags. For more information, see <>. +* EBS Detailed Performance Metrics +** You cannot access Prometheus metrics for EBS detailed performance + +== Install CSI Snapshot Controller add-on + +EKS Auto Mode is compatible with the CSI Snapshot Controller Amazon EKS add-on. + +{aws} suggests you configure this add-on to run on the built-in `system` node pool. + +For more information, see: + +* <> +* <> +* <> + + +[[auto-install-snapshot-controller,auto-install-snapshot-controller.title]] +=== To install snapshot controller in system node pool + +. Open your EKS cluster in the {aws} console +. From the *Add-ons* tab, select *Get more add-ons* +. Select the *CSI Snapshot Controller* and then *Next* +. On the *Configure selected add-ons settings* page, select *Optional configuration settings* to view the *Add-on configuration schema* +.. Insert the following yaml to associate the snapshot controller with the `system` node pool. The snapshot controller includes a toleration for the `CriticalAddonsOnly` taint. ++ +[source,yaml] +---- +{ + "nodeSelector": { + "karpenter.sh/nodepool": "system" + } +} +---- +.. Select *Next* +. Review the add-on configuration and then select *Create* diff --git a/latest/ug/automode/critical-workload.adoc b/latest/ug/automode/critical-workload.adoc new file mode 100644 index 00000000..75c3668e --- /dev/null +++ b/latest/ug/automode/critical-workload.adoc @@ -0,0 +1,70 @@ +//!!NODE_ROOT
+[.topic] +[[critical-workload,critical-workload.title]] += Run critical add-ons on dedicated instances +:info_doctype: section +:info_titleabbrev: Run critical add-ons + +include::../attributes.txt[] + +In this topic, you will learn how to deploy a workload with a `CriticalAddonsOnly` toleration so EKS Auto Mode will schedule it onto the `system` node pool. + +EKS Auto Mode's built-in `system` node pool is designed for running critical add-ons on dedicated instances. This segregation ensures essential components have dedicated resources and are isolated from general workloads, enhancing overall cluster stability and performance. + +This guide demonstrates how to deploy add-ons to the `system` node pool by utilizing the `CriticalAddonsOnly` toleration and appropriate node selectors. By following these steps, you can ensure that your critical applications are scheduled onto the dedicated `system` nodes, leveraging the isolation and resource allocation benefits provided by EKS Auto Mode's specialized node pool structure. + +EKS Auto Mode has two built-in node pools: `general-purpose` and `system`. For more information, see <>. + +The purpose of the `system` node pool is to segregate critical add-ons onto different nodes. Nodes provisioned by the `system` node pool have a `CriticalAddonsOnly` Kubernetes taint. Kubernetes will only schedule pods onto these nodes if they have a corresponding toleration. For more information, see https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/[Taints and Tolerations] in the Kubernetes documentation. + + +== Prerequisites + +* EKS Auto Mode Cluster with the built-in `system` node pool enabled. For more information, see <> +* `kubectl` installed and configured. For more information, see <>. + +== Procedure + +Review the example yaml below. Note the following configurations: + +* `nodeSelector` -- This associates the workload with the built-in `system` node pool. This node pool must be enabled with the {aws} API. For more information, see <>. +* `tolerations` -- This toleration overcomes the `CriticalAddonsOnly` taint on nodes in the `system` node pool. + +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sample-app +spec: + replicas: 3 + selector: + matchLabels: + app: sample-app + template: + metadata: + labels: + app: sample-app + spec: + nodeSelector: + karpenter.sh/nodepool: system + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - name: app + image: nginx:latest + resources: + requests: + cpu: "500m" + memory: "512Mi" +---- + +To update a workload to run on the `system` node pool, you need to: + +. Update the existing workload to add the following configurations described above: +** `nodeSelector` +** `tolerations` +. Deploy the updated workload to your cluster with `kubectl apply` + +After updating the workload, it will run on dedicated nodes. diff --git a/latest/ug/automode/migrate-auto.adoc b/latest/ug/automode/migrate-auto.adoc new file mode 100644 index 00000000..fe53704b --- /dev/null +++ b/latest/ug/automode/migrate-auto.adoc @@ -0,0 +1,79 @@ +//!!NODE_ROOT
+[.topic] +include::../attributes.txt[] +[[migrate-auto,migrate-auto.title]] += Enable EKS Auto Mode on existing EKS clusters +:info_doctype: section +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_title: Enable EKS Auto Mode on existing EKS clusters +:info_titleabbrev: Enable existing clusters +:info_abstract: Learn about the tools needed to migrate an Amazon EKS cluster to EKS Auto Mode. + +[abstract] +-- +Learn about the tools needed for creating and working with an Amazon EKS cluster in EKS Auto Mode. +-- + +You can enable EKS Auto Mode on existing EKS Clusters. + +[NOTE] +==== +EKS Auto Mode requires Kubernetes version 1.29 or greater. +==== + +*{aws} supports the following migrations:* + +* Migrating from Karpenter to EKS Auto Mode Nodes +** Learn how to <> +* Migrating from EKS Managed Node Groups to EKS Auto Mode Nodes +** Learn how to <> +* Migrating from EKS Fargate to EKS Auto Mode Nodes + +*{aws} does not support the following migrations:* + +* Migrating volumes from the EBS CSI Controller to EKS Auto Mode Block Storage +** You can install the EBS CSI controller on an Amazon EKS Auto Mode cluster. Use a `StorageClass` to associate volumes with either the EBS CSI Controller or EKS Auto Mode. +* Migrating load balancers from the {aws} Load Balancer Controller to EKS Auto Mode +** You can install the {aws} Load Balancer Controller on an Amazon EKS Auto Mode cluster. Use the `IngressClass` or `loadBalancerClass` options to associate Service and Ingress resources with either the Load Balancer Controller or EKS Auto Mode. +* Migrating EKS Clusters with alternative CNIs or other unsupported networking configurations + +== Migration Reference + +Use the following migration reference to configure Kubernetes Resources to be owned by either self-managed controllers or EKS Auto Mode. + +[%header,cols="1,1,1,1,1"] +|=== + +| Capability | Resource | Field | Self Managed | EKS Auto Mode + +| Block Storage | StorageClass | provisioner | kubernetes.io/aws-ebs | ebs.csi.eks.amazonaws.com + +| Load Balancing | Service | loadBalancerClass | service.k8s.aws/nlb | eks.amazonaws.com/nlb + +| Load Balancing | IngressClass | controller | ingress.k8s.aws/alb | eks.amazonaws.com/alb + + + +|Load Balancing | IngressClassParams |apiversion |elbv2.k8s.aws/v1beta1 |eks.amazonaws.com/v1 + +|Load Balancing | TargetGroupBinding |apiversion |elbv2.k8s.aws/v1beta1 |eks.amazonaws.com/v1 + +| Compute | NodeClass | apiVersion | karpenter.sh/v1alpha5 | eks.amazonaws.com/v1 + +|=== + +== Load Balancer Migration + +You cannot directly transfer existing load balancers from the self-managed {aws} load balancer controller to EKS Auto Mode. Instead, you must implement a blue-green deployment strategy. This involves maintaining your existing load balancer configuration while creating new load balancers under the managed controller. + +To minimize service disruption, we recommend a DNS-based traffic shifting approach. First, create new load balancers using EKS Auto Mode while keeping your existing configuration operational. Then, use DNS routing (such as Route 53) to gradually shift traffic from the old load balancers to the new ones. Once traffic has been successfully migrated and you've verified the new configuration, you can decommission the old load balancers and self-managed controller. + +include::auto-enable-existing.adoc[leveloffset=+1] + +include::auto-migrate-karpenter.adoc[leveloffset=+1] + +include::auto-migrate-mng.adoc[leveloffset=+1] diff --git a/latest/ug/automode/old/hpa_scaling.adoc b/latest/ug/automode/old/hpa_scaling.adoc new file mode 100644 index 00000000..5e0fe70c --- /dev/null +++ b/latest/ug/automode/old/hpa_scaling.adoc @@ -0,0 +1,242 @@ +//!!NODE_ROOT
+ +[.topic] +[[auto-hpa-scaling,auto-hpa-scaling.title]] += End-to-end Compute Autoscaling with HPA +:info_doctype: section + +include::../attributes.txt[] + + +This guide shows you how Karpenter autoscales nodes in conjunction with HPA scaling your applications. + +== What we'll do? + +* Install the Metrics Server +* Deploy an application with 1 replica to start with +* Deploy an HPA autoscaling rule with a target CPU average utilization of 70% +* Simulate sending some real traffic to the UI service to stress the application for 5 mins +* See autoscaling in action, both for the workload with HPA and the nodes with Karpenter + +== Prerequisites + +* watch (https://formulae.brew.sh/formula/watch[Mac], https://www.powershellgallery.com/packages/Watch-Command/0.1.3[Windows]) +* https://kubernetes.io/docs/tasks/tools/#kubectl[kubectl] +* https://helm.sh/docs/intro/install/[Helm] + +== 1. Deploy Metrics Server + +In order for HPA to collect metrics from your application, you need to deploy the metrics server: + +---- +helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/ +helm repo update +helm install metrics-server metrics-server/metrics-server \ +--set 'tolerations[0].key=CriticalAddonsOnly,tolerations[0].operator=Exists,tolerations[0].effect=NoSchedule' \ +--set 'nodeSelector.karpenter\.sh/nodepool=system' \ + -n kube-system +---- + +Your EKS cluster has no nodes at the moment. Therefore, Karpenter will launch a node to run the metrics server pod on it. Notice that we've added a toleration to launch a node using the `system` NodePool as this is a critical addon, as well as a `nodeSelector`. + +Wait around one minute for the node to be ready. Then, run this command to confirm the metrics server pod is `Running`: + +---- +kubectl get pods -n kube-system | grep metrics-server +---- + +== 2. Deploy an application + +To see autoscaling in action, let's deploy the following application: + +---- +cat < + +[.topic] +[[sample-storage-workload,sample-storage-workload.title]] += Deploy a sample stateful workload to EKS Auto Mode +:info_doctype: section +:info_title: Deploy a sample stateful workload to EKS Auto Mode +:info_titleabbrev: Deploy stateful workload +:info_abstract: Deploy a sample stateful workload to EKS Auto Mode + +include::../attributes.txt[] + +This tutorial will guide you through deploying a sample stateful application to your EKS Auto Mode cluster. The application writes timestamps to a persistent volume, demonstrating EKS Auto Mode's automatic EBS volume provisioning and persistence capabilities. + +## Prerequisites + +* An EKS Auto Mode cluster +* The {aws} CLI configured with appropriate permissions +* `kubectl` installed and configured +** For more information, see <>. + +## Step 1: Configure your environment + +. Set your environment variables: ++ +[source,bash] +---- +export CLUSTER_NAME=my-auto-cluster +export AWS_REGION="us-west-2" +---- +. Update your kubeconfig: ++ +[source,bash] +---- +aws eks update-kubeconfig --name "${CLUSTER_NAME}" +---- + +## Step 2: Create the storage class + +The StorageClass defines how EKS Auto Mode will provision EBS volumes. + +{eam} does not create a StorageClass for you. You must create a StorageClass referencing `ebs.csi.eks.amazonaws.com` to use the storage capability of {eam}. + +. Create a file named `storage-class.yaml`: ++ +[source,yaml] +---- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: auto-ebs-sc + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: ebs.csi.eks.amazonaws.com +volumeBindingMode: WaitForFirstConsumer +parameters: + type: gp3 + encrypted: "true" +---- +. Apply the StorageClass: ++ +[source,bash] +---- +kubectl apply -f storage-class.yaml +---- + +*Key components:* + +- `provisioner: ebs.csi.eks.amazonaws.com` - Uses {eam} +- `volumeBindingMode: WaitForFirstConsumer` - Delays volume creation until a pod needs it +- `type: gp3` - Specifies the EBS volume type +- `encrypted: "true"` - EBS will use the default `aws/ebs` key to encrypt volumes created with this class. This is optional, but reccomended. +- `storageclass.kubernetes.io/is-default-class: "true"` - Kubernetes will use this storage class by default, unless you specify a different volume class on a persistent volume claim. Use caution when setting this value if you are migrating from another storage controller. (optional) + +## Step 3: Create the persistent volume claim + +The PVC requests storage from the StorageClass. + +. Create a file named `pvc.yaml`: ++ +[source,yaml] +---- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: auto-ebs-claim +spec: + accessModes: + - ReadWriteOnce + storageClassName: auto-ebs-sc + resources: + requests: + storage: 8Gi +---- +. Apply the PVC: ++ +[source,bash] +---- +kubectl apply -f pvc.yaml +---- + +*Key components:* + +- `accessModes: ReadWriteOnce` - Volume can be mounted by one node at a time +- `storage: 8Gi` - Requests an 8 GiB volume +- `storageClassName: auto-ebs-sc` - References the StorageClass we created + +## Step 4: Deploy the Application + +The Deployment runs a container that writes timestamps to the persistent volume. + +. Create a file named `deployment.yaml`: ++ +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inflate-stateful +spec: + replicas: 1 + selector: + matchLabels: + app: inflate-stateful + template: + metadata: + labels: + app: inflate-stateful + spec: + terminationGracePeriodSeconds: 0 + nodeSelector: + eks.amazonaws.com/compute-type: auto + containers: + - name: bash + image: public.ecr.aws/docker/library/bash:4.4 + command: ["/usr/local/bin/bash"] + args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 60; done"] + resources: + requests: + cpu: "1" + volumeMounts: + - name: persistent-storage + mountPath: /data + volumes: + - name: persistent-storage + persistentVolumeClaim: + claimName: auto-ebs-claim +---- +. Apply the Deployment: ++ +[source,bash] +---- +kubectl apply -f deployment.yaml +---- + +*Key components:* + +- Simple bash container that writes timestamps to a file +- Mounts the PVC at `/data` +- Requests 1 CPU core +- Uses node selector for EKS managed nodes + +## Step 5: Verify the Setup + +. Check that the pod is running: ++ +[source,bash] +---- +kubectl get pods -l app=inflate-stateful +---- +. Verify the PVC is bound: ++ +[source,bash] +---- +kubectl get pvc auto-ebs-claim +---- +. Check the EBS volume: ++ +[source,bash] +---- +# Get the PV name +PV_NAME=$(kubectl get pvc auto-ebs-claim -o jsonpath='{.spec.volumeName}') +# Describe the EBS volume +aws ec2 describe-volumes \ + --filters Name=tag:CSIVolumeName,Values=${PV_NAME} +---- +. Verify data is being written: ++ +[source,bash] +---- +kubectl exec "$(kubectl get pods -l app=inflate-stateful \ + -o=jsonpath='{.items[0].metadata.name}')" -- \ + cat /data/out.txt +---- + +## Step 6: Cleanup + +Run the following command to remove all resources created in this tutorial: + +[source,bash] +---- +# Delete all resources in one command +kubectl delete deployment/inflate-stateful pvc/auto-ebs-claim storageclass/auto-ebs-sc +---- + +## What's Happening Behind the Scenes + +. The PVC requests storage from the StorageClass +. When the Pod is scheduled: +.. EKS Auto Mode provisions an EBS volume +.. Creates a PersistentVolume +.. Attaches the volume to the node +. The Pod mounts the volume and begins writing timestamps + +== Snapshot Controller + +{eam} is compatible with the Kubernetes CSI Snapshotter, also known as the snapshot controller. However, {eam} does not include the snapshot controller. You are responsible for installing and configuring the snapshot controller. For more information, see <>. + +Review the following `VolumeSnapshotClass` that references the storage capability of {eam}. + +[source,yaml] +---- +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: auto-ebs-vsclass +driver: ebs.csi.eks.amazonaws.com +deletionPolicy: Delete +---- + +https://github.com/kubernetes-csi/external-snapshotter/blob/master/README.md#usage[Learn more about the Kubernetes CSI Snapshotter.] diff --git a/latest/ug/automode/set-builtin-node-pools.adoc b/latest/ug/automode/set-builtin-node-pools.adoc new file mode 100644 index 00000000..0082f30a --- /dev/null +++ b/latest/ug/automode/set-builtin-node-pools.adoc @@ -0,0 +1,59 @@ +//!!NODE_ROOT
+[.topic] +[[set-builtin-node-pools,set-builtin-node-pools.title]] += Enable or Disable Built-in NodePools +:info_doctype: section +:info_titleabbrev: Review built-in node pools + +include::../attributes.txt[] + +{eam} has two built-in NodePools. You can enable or disable these NodePools using the {aws} console, CLI, or API. + +== Built-in NodePool Reference + +* `system` +** This NodePool has a `CriticalAddonsOnly` taint. Many EKS addons, such as CoreDNS, tolerate this taint. Use this system node pool to segregate cluster-critical applications. +** Supports both `amd64` and `arm64` architectures. +* `general-purpose` +** This NodePool provides support for launching nodes for general purpose workloads in your cluster. +** Uses only `amd64` architecture. + +Both built-in NodePools: + +* Use the default EKS NodeClass +* Use only on-demand EC2 capacity +* Use the C, M, and R EC2 instance families +* Require generation 5 or newer EC2 instances + +## Prerequisites + +* The latest version of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device. To check your current version, use `aws --version`. To install the latest version, see link:cli/latest/userguide/getting-started-install.html["Installing",type="documentation"] and link:cli/latest/userguide/cli-chap-configure.html#cli-configure-quickstart-config["Quick configuration",type="documentation"] with aws configure in the {aws} Command Line Interface User Guide. +** Login to the CLI with sufficent IAM permissions to create {aws} resources including IAM Policies, IAM Roles, and EKS Clusters. + +== Enable with {aws} CLI + +Use the following command to enable both built-in NodePools: + +[source,cli] +---- +aws eks update-cluster-config \ + --name \ + --compute-config '{ + "nodeRoleArn": "", + "nodePools": ["general-purpose", "system"] + }' + +---- + +You can modify the command to selectively enable the NodePools. + +== Disable with {aws} CLI + +Use the following command to disable both built-in NodePools: + +[source,cli] +---- +aws eks update-cluster-config \ + --name \ + --compute-config '{"nodePools": []}' +---- diff --git a/latest/ug/automode/settings-auto.adoc b/latest/ug/automode/settings-auto.adoc new file mode 100644 index 00000000..23f7f2bf --- /dev/null +++ b/latest/ug/automode/settings-auto.adoc @@ -0,0 +1,113 @@ +//!!NODE_ROOT +[.topic] +include::../attributes.txt[] +[[settings-auto,settings-auto.title]] += Configure EKS Auto Mode settings +:info_doctype: section +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_title: Change EKS Auto cluster settings +:info_titleabbrev: Configure +:info_abstract: Change EKS Auto cluster settings + +[abstract] +-- +Change EKS Auto cluster settings +-- + +This chapter describes how to configure specific aspects of your Amazon Elastic Kubernetes Service (EKS) Auto Mode clusters. While EKS Auto Mode manages most infrastructure components automatically, you can customize certain features to meet your workload requirements. + +Using the configuration options described in this topic, you can modify networking settings, compute resources, and load balancing behaviors while maintaining the benefits of automated infrastructure management. Before making any configuration changes, review the available options in the following sections to determine which approach best suits your needs. + +[role="no-scroll"] +[cols="1,1"] +|=== +|What features do you want to configure? |Configuration option + +a| +*Node networking and storage* + +- Configure node placement across public and private subnets +- Define custom security groups for node access control +- Customize network address translation (SNAT) policies +- Enable detailed network policy logging and monitoring +- Set ephemeral storage parameters (size, IOPS, throughput) +- Configure encrypted ephemeral storage with custom KMS keys +|<> + +a| +*Node compute resources* + +- Select specific EC2 instance types and families +- Define CPU architectures (x86_64, ARM64) +- Configure capacity types (On-Demand, Spot) +- Specify Availability Zones +- Configure node taints and labels +- Set minimum and maximum node counts +|<> + +a| +*Application Load Balancer settings* + +- Deploy internal or internet-facing load balancers +- Configure cross-zone load balancing +- Set idle timeout periods +- Enable HTTP/2 and WebSocket support +- Configure health check parameters +- Specify TLS certificate settings +- Define target group attributes +- Set IP address type (IPv4, dual-stack) +|<> + +a| +*Network Load Balancer settings* + +- Configure direct pod IP routing +- Enable cross-zone load balancing +- Set connection idle timeout +- Configure health check parameters +- Specify subnet placement +- Set IP address type (IPv4, dual-stack) +- Configure preserve client source IP +- Define target group attributes +|<> + + +a| +*Storage Class settings* + +- Define EBS volume types (gp3, io1, io2, etc.) +- Configure volume encryption and KMS key usage +- Set IOPS and throughput parameters +- Set as default storage class +- Define custom tags for provisioned volumes +|<> + +|=== + + + +include::create-node-class.adoc[leveloffset=+1] + +include::create-node-pool.adoc[leveloffset=+1] + +include::auto-configure-alb.adoc[leveloffset=+1] + +include::auto-configure-nlb.adoc[leveloffset=+1] + +include::create-storage-class.adoc[leveloffset=+1] + +include::auto-disable.adoc[leveloffset=+1] + +include::auto-upgrade.adoc[leveloffset=+1] + +include::set-builtin-node-pools.adoc[leveloffset=+1] + +include::associate-workload.adoc[leveloffset=+1] + +include::critical-workload.adoc[leveloffset=+1] + +include::auto-net-pol.adoc[leveloffset=+1] diff --git a/latest/ug/automode/troubleshoot-lbc.adoc b/latest/ug/automode/troubleshoot-lbc.adoc new file mode 100644 index 00000000..c9918fde --- /dev/null +++ b/latest/ug/automode/troubleshoot-lbc.adoc @@ -0,0 +1,105 @@ +//!!NODE_ROOT
+[.topic] +[[troubleshoot-lbc,troubleshoot-lbc.title]] +# Troubleshooting Amazon EKS Auto Mode Load Balancer Controller +:info_doctype: section + +[NOTE] +==== +This resource is not ready for publication. +==== + + +include::../attributes.txt[] + +This guide helps you troubleshoot issues with the {aws} Load Balancer Controller when using Amazon EKS Auto Mode. + +## Verify Ingress Resources + +Check the status of your Ingress resources: + +```bash +kubectl get ingress --all-namespaces +kubectl describe ingress -n +``` + +Look for: + +- Proper annotations +- ALB DNS name in the Address field +- Events indicating any issues + +## Check {aws} Resources + +Verify these resources in the {aws} Management Console or using {aws} CLI: + +- Application Load Balancers +- Target Groups +- Security Groups + +## Common Issues + +### Ingress Not Creating ALB + +1. Verify Ingress annotations +2. Check security group configuration +3. Validate subnet configuration +4. Review target group settings + +### Target Group Health Check Failures + +1. Ensure security group allows health check traffic +2. Verify application endpoints are responding +3. Check health check path and settings + +### Networking Issues + +1. Verify subnet tagging: + - Public subnets: `kubernetes.io/role/elb: 1` + - Private subnets: `kubernetes.io/role/internal-elb: 1` +2. Check VPC internet connectivity for public ALBs +3. Review route tables and NAT gateway configuration + +### Security Group Problems + +1. Verify inbound rules allow traffic on required ports +2. Ensure outbound rules allow health check traffic +3. Check security group associations + +## Advanced Troubleshooting + +### Version Compatibility + +Ensure compatibility between: +- Kubernetes version +- EKS version +- {aws} SDK version + +### Resource Cleanup + +For stuck resources: + +1. Remove finalizers: + ```bash + kubectl patch ingress -p '{"metadata":{"finalizers":[]}}' --type=merge + ``` +2. Check for orphaned {aws} resources (ALBs, target groups, listener rules) + +## Best Practices + +1. Regularly review Ingress events and {aws} resource synchronization +2. Document custom annotations and maintain troubleshooting runbooks +3. Test changes in non-production environments first + +## Considerations for EKS Auto Mode + +- Limited customization options +- {aws} manages controller updates +- IAM permissions are handled automatically +- Focus on monitoring {aws} resources and Ingress events + +## Additional Resources + +- link:https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html[{aws} EKS Troubleshooting] +- link:https://console.aws.amazon.com/support/home[{aws} Support Center] +- link:https://status.aws.amazon.com/[{aws} Service Health Dashboard] diff --git a/latest/ug/automode/wip.adoc b/latest/ug/automode/wip.adoc new file mode 100644 index 00000000..3ae07b14 --- /dev/null +++ b/latest/ug/automode/wip.adoc @@ -0,0 +1,21 @@ +//!!NODE_ROOT +[.topic] +[[auto-wip,auto-wip.title]] += EKS Auto Mode: Revisions to existing pages WIP +:info_doctype: section +:toc: left + + +include::../attributes.txt[] + +This section contains in-progress revisions for existing pages in other chapters. + +//include::wip/quickstart-v2.adoc[leveloffset=+1] + +//include::wip/create-cluster-v2.adoc[leveloffset=+1] + +//include::wip/auto-migrate-karpenter-v2.adoc[leveloffset=+1] + +//include::wip/auto-cluster-iam-role.adoc[leveloffset=+1] + +//include::wip/auto-create-node-role.adoc[leveloffset=+1] diff --git a/latest/ug/automode/wip/create-vpc-console.adoc b/latest/ug/automode/wip/create-vpc-console.adoc new file mode 100644 index 00000000..f96c7cff --- /dev/null +++ b/latest/ug/automode/wip/create-vpc-console.adoc @@ -0,0 +1,148 @@ +//!!NODE_ROOT
+[.topic] +[[create-vpc-console,create-vpc-console.title]] += Create a VPC for Amazon EKS with the web console +:info_doctype: section + +include::../../attributes.txt[] + +This guide walks you through creating a Virtual Private Cloud (VPC) that's optimized for Amazon Elastic Kubernetes Service (EKS) clusters using the {aws} Management Console. + +== Overview + +When creating a VPC for EKS, you'll need to configure specific networking requirements to ensure proper cluster functionality, including: +- Public and private subnets across multiple Availability Zones +- NAT gateways for outbound internet access from private subnets +- Appropriate CIDR ranges to accommodate pod and service IP addresses +- Required VPC endpoints for EKS functionality + +== Prerequisites + +- An {aws} account +- Access to the {aws} Management Console +- IAM permissions for VPC and EKS resource creation +- Planned CIDR ranges for your VPC, pods, and services + +== Creation Steps + +=== Step 1: Access the VPC Creation Page +1. Sign in to the {aws} Management Console +2. Navigate to the VPC Dashboard +3. Click "Create VPC" + +=== Step 2: Configure Basic VPC Settings +1. Under "VPC settings", select "VPC and more" +2. Enter a name tag in the "Name tag auto-generation" field + - Example: "eks-cluster-vpc" + - This will auto-generate names for all resources with the prefix + +=== Step 3: Configure Network Settings +1. IPv4 CIDR block: + - Enter your VPC CIDR (recommended: 10.0.0.0/16) + - Ensure the CIDR is large enough to accommodate: + - EKS nodes + - Pods (default CNI: up to 250 pods per node) + - Services (default: 10.100.0.0/16) + +2. Set Availability Zones: + - Select at least two AZs (recommended: 3 for production) + - EKS requires a minimum of two AZs for high availability + +3. Configure Subnets: + - Public subnets: Set to 2 or 3 (matching your AZ count) + - Required for public-facing load balancers + - Used for NAT gateways + - Private subnets: Set to 2 or 3 (matching your AZ count) + - Will host your EKS nodes + - Must have outbound internet access via NAT + +=== Step 4: Configure NAT Gateways +1. Choose "1 per AZ" for production environments + - While more expensive, this provides high availability for node outbound traffic + - For dev/test, you can use "In 1 AZ" to reduce costs + +=== Step 5: Configure VPC Endpoints +1. Select "S3 Gateway" + - Required for pulling container images from Amazon ECR +2. Consider adding these endpoints later for enhanced security: + - com.amazonaws.[region].ecr.api + - com.amazonaws.[region].ecr.dkr + - com.amazonaws.[region].eks + - com.amazonaws.[region].elasticloadbalancing + - com.amazonaws.[region].logs + +=== Step 6: Configure DNS Options +1. Enable both options: + - DNS hostnames (required for EKS) + - DNS resolution (required for EKS) + +=== Step 7: Review and Create +1. Review the preview panel to ensure: + - All subnets are properly distributed across AZs + - CIDR ranges don't overlap +2. Click "Create VPC" + +== Post-Creation Configuration + +=== Step 1: Tag Subnets for EKS +After VPC creation, add these required tags to your subnets: + +For public subnets: +``` +kubernetes.io/cluster/ = shared +kubernetes.io/role/elb = 1 +``` + +For private subnets: +``` +kubernetes.io/cluster/ = shared +kubernetes.io/role/internal-elb = 1 +``` + +=== Step 2: Security Group Configuration +1. Create a cluster security group that allows: + - All inbound traffic from within the VPC + - Required outbound traffic for node communication + +== Best Practices + +1. Networking: + - Use private subnets for worker nodes + - Use public subnets only for load balancers + - Ensure sufficient IP addresses for pods and services + +2. High Availability: + - Use 3 AZs for production environments + - Deploy NAT Gateways in each AZ + - Size subnets appropriately for expected growth + +3. Security: + - Implement VPC endpoints for {aws} services + - Use security groups to control traffic + - Follow the principle of least privilege + +== Next Steps + +1. Create your EKS cluster using this VPC +2. Configure additional VPC endpoints as needed +3. Set up monitoring for NAT gateway bandwidth +4. Plan IP address management for pods and services + +== Troubleshooting Tips + +1. Common Issues: + - Insufficient IP addresses for pods + - Missing required subnet tags + - NAT gateway connectivity issues + +2. Validation Steps: + - Verify subnet tags + - Check route tables + - Confirm security group rules + +== Additional Resources + +- EKS VPC Requirements Documentation +- EKS Best Practices Guide +- VPC Pricing Calculator +- EKS Networking Documentation diff --git a/latest/ug/automode/wip/eksctl-docs.adoc b/latest/ug/automode/wip/eksctl-docs.adoc new file mode 100644 index 00000000..f7f7c478 --- /dev/null +++ b/latest/ug/automode/wip/eksctl-docs.adoc @@ -0,0 +1,119 @@ +//!!NODE_ROOT
+[.topic] +[[eksctl-docs,eksctl-docs.title]] += EKS Auto Mode +:info_doctype: section + +include::../../attributes.txt[] + +== Introduction + +eksctl supports xref:automode[EKS Auto Mode], a feature that extends {aws} management of Kubernetes clusters beyond the cluster itself, +to allow {aws} to also set up and manage the infrastructure that enables the smooth operation of your workloads. +This allows you to delegate key infrastructure decisions and leverage the expertise of {aws} for day-to-day operations. +Cluster infrastructure managed by {aws} includes many Kubernetes capabilities as core components, as opposed to add-ons, +such as compute autoscaling, pod and service networking, application load balancing, cluster DNS, block storage, and GPU support. + +== Creating an EKS cluster with Auto Mode enabled + +`eksctl` has added a new `autoModeConfig` field to enable and configure Auto Mode. The shape of the `autoModeConfig` field is + +```yaml +autoModeConfig: + # defaults to false + enabled: boolean + # optional, defaults to [general-purpose, system]. + # To disable creation of nodePools, set it to the empty array ([]). + nodePools: []string + # optional, eksctl creates a new role if this is not supplied + # and nodePools are present. + nodeRoleARN: string +``` + +If `autoModeConfig.enabled` is true, eksctl creates an EKS cluster by passing `computeConfig.enabled: true`, +`kubernetesNetworkConfig.elasticLoadBalancing.enabled: true`, and `storageConfig.blockStorage.enabled` to the EKS API, +enabling management of data plane components like compute, storage and networking. + +To create an EKS cluster with Auto Mode enabled, set `autoModeConfig.enabled: true`, as in + +```yaml +# auto-mode-cluster.yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: auto-mode-cluster + region: us-west-2 + +autoModeConfig: + enabled: true +``` + +```shell +$ eksctl create cluster -f auto-mode-cluster.yaml +``` + +eksctl creates the `general-purpose` and `system` node pools by default. To disable creation of the default node pools, +e.g., to configure your own node pools that use a different set of subnets, set `nodePools: []`, as in + +```yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: auto-mode-cluster + region: us-west-2 + +autoModeConfig: + enabled: true + nodePools: [] # disables creation of default node pools. +``` + +== Updating an EKS cluster to use Auto Mode +To update an existing EKS cluster to use Auto Mode, run + +```yaml +# cluster.yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: cluster + region: us-west-2 + +autoModeConfig: + enabled: true +``` + +```shell +$ eksctl update auto-mode-config -f cluster.yaml +``` + +[NOTE] +==== +If the cluster was created by eksctl, and it uses public subnets as cluster subnets, you will either have to +(https://eksctl.io/usage/cluster-subnets-security-groups/[update the cluster to use private subnets], or disable default node pools +by setting `autoModeConfig.nodePools: []`, before running `eksctl update auto-mode-config`. +eksctl disallows this to avoid having Auto Mode launch worker nodes in public subnets. +==== + + +== Disabling Auto Mode +To disable Auto Mode, set `autoModeConfig.enabled: false` and run + +```yaml +# cluster.yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: auto-mode-cluster + region: us-west-2 + +autoModeConfig: + enabled: false +``` + +```shell +$ eksctl update auto-mode-config -f cluster.yaml +``` + +== Further information + +- xref:automode[EKS Auto Mode] diff --git a/latest/ug/automode/wip/tag-subnets.adoc b/latest/ug/automode/wip/tag-subnets.adoc new file mode 100644 index 00000000..07365195 --- /dev/null +++ b/latest/ug/automode/wip/tag-subnets.adoc @@ -0,0 +1,32 @@ +//!!NODE_ROOT
+[.topic] +[[tag-subnets,tag-subnets.title]] += Tag VPC Subnets for Load Balancer Deployment +:info_doctype: section + +include::../../attributes.txt[] + +This topic explains how to tag your VPC subnets to enable load balancer deployment using the {aws} Management Console. + +== Required Tags + +Your subnets require specific tags based on their intended use: + +[options="header",cols="1,2,1"] +|=== +|Subnet Type |Tag Key |Tag Value +|Private Subnet |`kubernetes.io/role/internal-elb` |`1` +|Public Subnet |`kubernetes.io/role/elb` |`1` +|=== + +== Adding Tags in the Console + +1. Sign in to the {aws} Management Console +2. Navigate to **VPC** +>+ **Subnets** +3. Select the subnet you want to tag +4. Select the **Tags** tab in the lower panel +5. Choose **Add/Edit Tags** +6. Click **Add Tag** and enter: + - For private subnets: Key = `kubernetes.io/role/internal-elb`, Value = `1` + - For public subnets: Key = `kubernetes.io/role/elb`, Value = `1` +7. Click **Save** diff --git a/latest/ug/book.adoc b/latest/ug/book.adoc new file mode 100644 index 00000000..8d87ae74 --- /dev/null +++ b/latest/ug/book.adoc @@ -0,0 +1,90 @@ +//!!NODE_ROOT +include::attributes.txt[] +[[top]] += Amazon EKS +:doctype: book +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:info_doctype: book +:info_title: Amazon EKS +:info_subtitle: User Guide +:info_abstract: This is official Amazon Web Services ({aws}) documentation for Amazon Elastic Kubernetes Service (Amazon EKS). Amazon EKS is a managed \ + service that makes it easy for you to run Kubernetes on {aws} without needing to install and operate \ + your own Kubernetes clusters. Kubernetes is an open-source system for automating the deployment, scaling, \ + and management of containerized applications. +:info_corpauthor: Amazon Web Services +:info_publisher: Amazon Web Services +:info_copyright: 2024 \ +Amazon Web Services, Inc. and/or its affiliates. All rights reserved. +:info_legalnotice: Amazon's trademarks and trade dress may not be used in \ +connection with any product or service that is not Amazon's, \ +in any manner that is likely to cause confusion among customers, \ +or in any manner that disparages or discredits Amazon. All other \ +trademarks not owned by Amazon are the property of their respective \ +owners, who may or may not be affiliated with, connected to, or \ +sponsored by Amazon. +:keywords: EKS, Amazon EKS, Kubernetes, K8s, Cluster, Pod + +[abstract] +-- +This is official Amazon Web Services ({aws}) documentation for Amazon Elastic Kubernetes Service (Amazon EKS). Amazon EKS is a managed service that makes it easy for you to run [.noloc]`Kubernetes` on {aws} without needing to install and operate your own [.noloc]`Kubernetes` clusters. [.noloc]`Kubernetes` is an open-source system for automating the deployment, scaling, and management of containerized applications. +-- +:sectnums: + +[.banner.info] +*Help improve this page* + +[.banner.info] +Want to contribute to this user guide? Choose the *Edit this page on GitHub* link that is located in the right pane of every page. Your contributions will help make our user guide better for everyone. + +include::what-is/what-is-eks.adoc[leveloffset=+1] + +include::getting-started/setting-up.adoc[leveloffset=+1] + +include::quickstart.adoc[leveloffset=+1] + +include::getting-started/learn-eks.adoc[leveloffset=+1] + +include::getting-started/getting-started.adoc[leveloffset=+1] + +include::automode/automode.adoc[leveloffset=+1] + +include::clusters/clusters.adoc[leveloffset=+1] + +include::manage-access/cluster-auth.adoc[leveloffset=+1] + +include::nodes/eks-compute.adoc[leveloffset=+1] + +include::storage/storage.adoc[leveloffset=+1] + +include::networking/eks-networking.adoc[leveloffset=+1] + +include::workloads/eks-workloads.adoc[leveloffset=+1] + +include::clusters/management/eks-managing.adoc[leveloffset=+1] + +include::security/security.adoc[leveloffset=+1] + +include::observability/eks-observe.adoc[leveloffset=+1] + +include::integrations/eks-integrations.adoc[leveloffset=+1] + +include::troubleshooting/troubleshooting.adoc[leveloffset=+1] + +include::connector/eks-connector.adoc[leveloffset=+1] + +include::outposts/eks-outposts.adoc[leveloffset=+1] + +include::ml/machine-learning-on-eks.adoc[leveloffset=+1] + +include::related-projects.adoc[leveloffset=+1] + +include::roadmap.adoc[leveloffset=+1] + +include::doc-history.adoc[leveloffset=+1] + +include::contribute/contribute.adoc[leveloffset=+1] + diff --git a/latest/ug/clusters/authentication/images b/latest/ug/clusters/authentication/images new file mode 120000 index 00000000..5fa69870 --- /dev/null +++ b/latest/ug/clusters/authentication/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/latest/ug/clusters/autoscaling.adoc b/latest/ug/clusters/autoscaling.adoc new file mode 100644 index 00000000..38abb8a6 --- /dev/null +++ b/latest/ug/clusters/autoscaling.adoc @@ -0,0 +1,44 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[autoscaling,autoscaling.title]] += Scale cluster compute with [.noloc]`Karpenter` and [.noloc]`Cluster Autoscaler` +:info_doctype: section +:info_title: Scale cluster compute with Karpenter and Cluster Autoscaler +:info_titleabbrev: Autoscaling +:info_abstract: Discover how Amazon EKS integrates Kubernetes autoscaling with {aws}, empowering rapid and efficient scaling of compute resources to meet application demands using Karpenter and Cluster Autoscaler. + +[abstract] +-- +Discover how Amazon EKS integrates Kubernetes autoscaling with {aws}, empowering rapid and efficient scaling of compute resources to meet application demands using Karpenter and Cluster Autoscaler. +-- + +Autoscaling is a function that automatically scales your resources out and in to meet changing demands. This is a major [.noloc]`Kubernetes` function that would otherwise require extensive human resources to perform manually. + +== EKS Auto Mode + +Amazon EKS Auto Mode automatically scales cluster compute resources. If a pod can't fit onto existing nodes, EKS Auto Mode creates a new one. EKS Auto Mode also consolidates workloads and deletes nodes. EKS Auto Mode builds upon Karpenter. + +For more information, see: + +* <> +* <> +* <> + +== Additional Solutions + +Amazon EKS supports two additional autoscaling products: + + + +*[.noloc]`Karpenter`*:: +[.noloc]`Karpenter` is a flexible, high-performance [.noloc]`Kubernetes` cluster autoscaler that helps improve application availability and cluster efficiency. [.noloc]`Karpenter` launches right-sized compute resources (for example, Amazon EC2 instances) in response to changing application load in under a minute. Through integrating [.noloc]`Kubernetes` with {aws}, [.noloc]`Karpenter` can provision just-in-time compute resources that precisely meet the requirements of your workload. [.noloc]`Karpenter` automatically provisions new compute resources based on the specific requirements of cluster workloads. These include compute, storage, acceleration, and scheduling requirements. Amazon EKS supports clusters using [.noloc]`Karpenter`, although [.noloc]`Karpenter` works with any conformant [.noloc]`Kubernetes` cluster. For more information, see the https://karpenter.sh/docs/[Karpenter] documentation. ++ +[IMPORTANT] +==== +Karpenter is open-source software which {aws} customers are responsible for installing, configuring, and managing in their Kubernetes clusters. {aws} provides technical support when Karpenter is run unmodified using a compatible version in Amazon EKS clusters. It is essential that customers maintain the availability and security of the Karpenter controller as well as appropriate testing procedures when upgrading it or the Kubernetes cluster in which it's running, just like any other customer-managed software. There is no {aws} Service Level Agreement (SLA) for Karpenter and customers are responsible for ensuring that the EC2 instances launched by Karpenter meet their business requirements. +==== + +*Cluster Autoscaler*:: +The [.noloc]`Kubernetes` Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler on {aws}]. diff --git a/latest/ug/clusters/cluster-endpoint.adoc b/latest/ug/clusters/cluster-endpoint.adoc new file mode 100644 index 00000000..dc1096ac --- /dev/null +++ b/latest/ug/clusters/cluster-endpoint.adoc @@ -0,0 +1,253 @@ +//!!NODE_ROOT
+[.topic] +[[cluster-endpoint,cluster-endpoint.title]] += Control network access to cluster API server endpoint +:info_doctype: section +:info_title: Control network access to cluster API server endpoint +:info_titleabbrev: Configure endpoint access +:info_abstract: Learn how to enable private access and limit public access to the Amazon EKS cluster Kubernetes API server endpoint for enhanced security with your Amazon EKS cluster. + +include::../attributes.txt[] + +[abstract] +-- +Learn how to enable private access and limit public access to the Amazon EKS cluster Kubernetes API server endpoint for enhanced security with your Amazon EKS cluster. +-- + +This topic helps you to enable private access for your Amazon EKS cluster's [.noloc]`Kubernetes` API server endpoint and limit, or completely disable, public access from the internet. + +When you create a new cluster, Amazon EKS creates an endpoint for the managed [.noloc]`Kubernetes` API server that you use to communicate with your cluster (using [.noloc]`Kubernetes` management tools such as `kubectl`). By default, this API server endpoint is public to the internet, and access to the API server is secured using a combination of {aws} Identity and Access Management (IAM) and native [.noloc]`Kubernetes` https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Role Based Access Control] (RBAC). This endpoint is known as the _cluster public endpoint_. Also there is a _cluster private endpoint_. For more information about the cluster private endpoint, see the following section <>. + +[[cluster-endpoint-ipv6,cluster-endpoint-ipv6.title]] +== `IPv6` cluster endpoint format + +EKS creates a unique dual-stack endpoint in the following format for new `IPv6` clusters that are made after October 2024. An _IPv6 cluster_ is a cluster that you select `IPv6` in the IP family (`ipFamily`) setting of the cluster. + +==== +[role="tablist"] +{aws}:: +EKS cluster public/private endpoint: +`eks-cluster.[.replaceable]``region``.api.aws` + +{aws} GovCloud (US):: +EKS cluster public/private endpoint: +`eks-cluster.[.replaceable]``region``.api.aws` + +{amazon-web-services} in China:: +EKS cluster public/private endpoint: +`eks-cluster.[.replaceable]``region``.api.amazonwebservices.com.cn` + +==== + +[NOTE] +==== +The dual-stack cluster endpoint was introduced in October 2024. For more information about `IPv6` clusters, see <>. Clusters made before October 2024, use following endpoint format instead. +==== + +[[cluster-endpoint-ipv4,cluster-endpoint-ipv4.title]] +== `IPv4` cluster endpoint format + +EKS creates a unique endpoint in the following format for each cluster that select `IPv4` in the IP family (ipFamily) setting of the cluster: + +==== +[role="tablist"] +{aws}:: +EKS cluster public/private endpoint +`eks-cluster.[.replaceable]``region``.eks.amazonaws.com` + +{aws} GovCloud (US):: +EKS cluster public/private endpoint +`eks-cluster.[.replaceable]``region``.eks.amazonaws.com` + +{amazon-web-services} in China:: +EKS cluster public/private endpoint +`eks-cluster.[.replaceable]``region``.api.amazonwebservices.com.cn` + +==== + +[NOTE] +==== +Before October 2024, `IPv6` clusters used this endpoint format also. For those clusters, both the public endpoint and the private endpoint have only `IPv4` addresses resolve from this endpoint. +==== + +[[cluster-endpoint-private,cluster-endpoint-private.title]] +== Cluster private endpoint + +You can enable private access to the [.noloc]`Kubernetes` API server so that all communication between your nodes and the API server stays within your VPC. You can limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server. + +[NOTE] +==== + +Because this endpoint is for the [.noloc]`Kubernetes` API server and not a traditional {aws} PrivateLink endpoint for communicating with an {aws} API, it doesn't appear as an endpoint in the Amazon VPC console. + +==== + +When you enable endpoint private access for your cluster, Amazon EKS creates a Route 53 private hosted zone on your behalf and associates it with your cluster's VPC. This private hosted zone is managed by Amazon EKS, and it doesn't appear in your account's Route 53 resources. In order for the private hosted zone to properly route traffic to your API server, your VPC must have `enableDnsHostnames` and `enableDnsSupport` set to `true`, and the DHCP options set for your VPC must include `AmazonProvidedDNS` in its domain name servers list. For more information, see link:vpc/latest/userguide/vpc-dns.html#vpc-dns-updating[Updating DNS support for your VPC,type="documentation"] in the _Amazon VPC User Guide_. + +You can define your API server endpoint access requirements when you create a new cluster, and you can update the API server endpoint access for a cluster at any time. + +[[modify-endpoint-access,modify-endpoint-access.title]] +== Modifying cluster endpoint access + +Use the procedures in this section to modify the endpoint access for an existing cluster. The following table shows the supported API server endpoint access combinations and their associated behavior. + +[cols="1,1,1", options="header"] +|=== +|Endpoint public access +|Endpoint private access +|Behavior + + +|Enabled +|Disabled +a| + +* This is the default behavior for new Amazon EKS clusters. +* [.noloc]`Kubernetes` API requests that originate from within your cluster's VPC (such as node to control plane communication) leave the VPC but not Amazon's network. +* Your cluster API server is accessible from the internet. You can, optionally, limit the CIDR blocks that can access the public endpoint. If you limit access to specific CIDR blocks, then it is recommended that you also enable the private endpoint, or ensure that the CIDR blocks that you specify include the addresses that nodes and Fargate [.noloc]`Pods` (if you use them) access the public endpoint from. + + +|Enabled +|Enabled +a| + +* [.noloc]`Kubernetes` API requests within your cluster's VPC (such as node to control plane communication) use the private VPC endpoint. +* Your cluster API server is accessible from the internet. You can, optionally, limit the CIDR blocks that can access the public endpoint. +* If you are using hybrid nodes with your Amazon EKS cluster, it is not recommended to have both Public and Private cluster endpoint access enabled. Because your hybrid nodes are running outside of your VPC, they will resolve the cluster endpoint to the public IP addresses. It is recommended to use either Public or Private cluster endpoint access for clusters with hybrid nodes. + + +|Disabled +|Enabled +a| + +* All traffic to your cluster API server must come from within your cluster's VPC or a link:whitepapers/latest/aws-vpc-connectivity-options/introduction.html[connected network,type="documentation"]. +* There is no public access to your API server from the internet. Any `kubectl` commands must come from within the VPC or a connected network. For connectivity options, see <>. +* The cluster's API server endpoint is resolved by public DNS servers to a private IP address from the VPC. In the past, the endpoint could only be resolved from within the VPC. ++ +If your endpoint does not resolve to a private IP address within the VPC for an existing cluster, you can: ++ +** Enable public access and then disable it again. You only need to do so once for a cluster and the endpoint will resolve to a private IP address from that point forward. +** <> your cluster. + +|=== + + +You can modify your cluster API server endpoint access using the {aws-management-console} or {aws} CLI. + + +== Configure endpoint access - {aws} console + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster to display your cluster information. +. Choose the *Networking* tab and choose *Update*. +. For *Private access*, choose whether to enable or disable private access for your cluster's [.noloc]`Kubernetes` API server endpoint. If you enable private access, [.noloc]`Kubernetes` API requests that originate from within your cluster's VPC use the private VPC endpoint. You must enable private access to disable public access. +. For *Public access*, choose whether to enable or disable public access for your cluster's [.noloc]`Kubernetes` API server endpoint. If you disable public access, your cluster's [.noloc]`Kubernetes` API server can only receive requests from within the cluster VPC. +. (Optional) If you've enabled *Public access*, you can specify which addresses from the internet can communicate to the public endpoint. Select *Advanced Settings*. Enter a CIDR block, such as [.replaceable]`203.0.113.5/32`. The block cannot include https://en.wikipedia.org/wiki/Reserved_IP_addresses[reserved addresses]. You can enter additional blocks by selecting *Add Source*. There is a maximum number of CIDR blocks that you can specify. For more information, see <>. If you specify no blocks, then the public API server endpoint receives requests from all (`0.0.0.0/0`) IP addresses. If you restrict access to your public endpoint using CIDR blocks, it is recommended that you also enable private endpoint access so that nodes and Fargate [.noloc]`Pods` (if you use them) can communicate with the cluster. Without the private endpoint enabled, your public access endpoint CIDR sources must include the egress sources from your VPC. For example, if you have a node in a private subnet that communicates to the internet through a NAT Gateway, you will need to add the outbound IP address of the NAT gateway as part of an allowed CIDR block on your public endpoint. +. Choose *Update* to finish. + + +== Configure endpoint access - {aws} CLI + +Complete the following steps using the {aws} CLI version `1.27.160` or later. You can check your current version with `aws --version`. To install or upgrade the {aws} CLI, see link:cli/latest/userguide/cli-chap-install.html[Installing the {aws} CLI,type="documentation"]. + +. Update your cluster API server endpoint access with the following {aws} CLI command. Substitute your cluster name and desired endpoint access values. If you set `endpointPublicAccess=true`, then you can (optionally) enter single CIDR block, or a comma-separated list of CIDR blocks for `publicAccessCidrs`. The blocks cannot include https://en.wikipedia.org/wiki/Reserved_IP_addresses[reserved addresses]. If you specify CIDR blocks, then the public API server endpoint will only receive requests from the listed blocks. There is a maximum number of CIDR blocks that you can specify. For more information, see <>. If you restrict access to your public endpoint using CIDR blocks, it is recommended that you also enable private endpoint access so that nodes and Fargate [.noloc]`Pods` (if you use them) can communicate with the cluster. Without the private endpoint enabled, your public access endpoint CIDR sources must include the egress sources from your VPC. For example, if you have a node in a private subnet that communicates to the internet through a NAT Gateway, you will need to add the outbound IP address of the NAT gateway as part of an allowed CIDR block on your public endpoint. If you specify no CIDR blocks, then the public API server endpoint receives requests from all (0.0.0.0/0) IP addresses. ++ +NOTE: The following command enables private access and public access from a single IP address for the API server endpoint. Replace [.replaceable]`203.0.113.5/32` with a single CIDR block, or a comma-separated list of CIDR blocks that you want to restrict network access to. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-cluster-config \ + --region region-code \ + --name my-cluster \ + --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "update": { + "id": "e6f0905f-a5d4-4a2a-8c49-EXAMPLE00000", + "status": "InProgress", + "type": "EndpointAccessUpdate", + "params": [ + { + "type": "EndpointPublicAccess", + "value": "true" + }, + { + "type": "EndpointPrivateAccess", + "value": "true" + }, + { + "type": "publicAccessCidrs", + "value": "[\203.0.113.5/32\"]" + } + ], + "createdAt": 1576874258.137, + "errors": [] + } +} +---- +. Monitor the status of your endpoint access update with the following command, using the cluster name and update ID that was returned by the previous command. Your update is complete when the status is shown as `Successful`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-update \ + --region region-code \ + --name my-cluster \ + --update-id e6f0905f-a5d4-4a2a-8c49-EXAMPLE00000 +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "update": { + "id": "e6f0905f-a5d4-4a2a-8c49-EXAMPLE00000", + "status": "Successful", + "type": "EndpointAccessUpdate", + "params": [ + { + "type": "EndpointPublicAccess", + "value": "true" + }, + { + "type": "EndpointPrivateAccess", + "value": "true" + }, + { + "type": "publicAccessCidrs", + "value": "[\203.0.113.5/32\"]" + } + ], + "createdAt": 1576874258.137, + "errors": [] + } +} +---- + + +[[private-access,private-access.title]] +== Accessing a private only API server + +If you have disabled public access for your cluster's [.noloc]`Kubernetes` API server endpoint, you can only access the API server from within your VPC or a link:whitepapers/latest/aws-vpc-connectivity-options/introduction.html[connected network,type="documentation"]. Here are a few possible ways to access the [.noloc]`Kubernetes` API server endpoint: + + + +*Connected network*:: +Connect your network to the VPC with an link:vpc/latest/tgw/what-is-transit-gateway.html[{aws} transit gateway,type="documentation"] or other link:aws-technical-content/latest/aws-vpc-connectivity-options/introduction.html[connectivity,type="documentation"] option and then use a computer in the connected network. You must ensure that your Amazon EKS control plane security group contains rules to allow ingress traffic on port 443 from your connected network. + + +*Amazon EC2 bastion host*:: +You can launch an Amazon EC2 instance into a public subnet in your cluster's VPC and then log in via SSH into that instance to run `kubectl` commands. For more information, see link:quickstart/architecture/linux-bastion/[Linux bastion hosts on {aws},type="marketing"]. You must ensure that your Amazon EKS control plane security group contains rules to allow ingress traffic on port 443 from your bastion host. For more information, see <>. ++ +When you configure `kubectl` for your bastion host, be sure to use {aws} credentials that are already mapped to your cluster's RBAC configuration, or add the link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that your bastion will use to the RBAC configuration before you remove endpoint public access. For more information, see <> and <>. + + +*{aws} Cloud9 IDE*:: +{aws} Cloud9 is a cloud-based integrated development environment (IDE) that lets you write, run, and debug your code with just a browser. You can create an {aws} Cloud9 IDE in your cluster's VPC and use the IDE to communicate with your cluster. For more information, see link:cloud9/latest/user-guide/create-environment.html[Creating an environment in {aws} Cloud9,type="documentation"]. You must ensure that your Amazon EKS control plane security group contains rules to allow ingress traffic on port 443 from your IDE security group. For more information, see <>. ++ +When you configure `kubectl` for your {aws} Cloud9 IDE, be sure to use {aws} credentials that are already mapped to your cluster's RBAC configuration, or add the IAM principal that your IDE will use to the RBAC configuration before you remove endpoint public access. For more information, see <> and <>. diff --git a/latest/ug/clusters/cluster-insights.adoc b/latest/ug/clusters/cluster-insights.adoc new file mode 100644 index 00000000..fa079624 --- /dev/null +++ b/latest/ug/clusters/cluster-insights.adoc @@ -0,0 +1,201 @@ +//!!NODE_ROOT
+[.topic] +[[cluster-insights,cluster-insights.title]] += Prepare for [.noloc]`Kubernetes` version upgrades with cluster insights +:info_titleabbrev: Cluster insights +:keywords: cluster, upgrade, insights + +include::../attributes.txt[] + +[abstract] +-- +Discover how Amazon EKS cluster insights help monitor and resolve potential [.noloc]`Kubernetes` version upgrade issues for enhanced reliability and faster adoption of new capabilities. +-- + +Amazon EKS cluster insights provide recommendations to help you follow Amazon EKS and [.noloc]`Kubernetes` best practices. Every Amazon EKS cluster undergoes automatic, recurring checks against an Amazon EKS curated list of insights. These insight checks are fully managed by Amazon EKS and offer recommendations on how to address any findings. + +* Before updating your cluster [.noloc]`Kubernetes` version, check the *Cluster insights* tab of the observability dashboard in the link:eks/home#/clusters[Amazon EKS console.,type="console"] +* If your cluster has identified issues, review them and make appropriate fixes. The issues include links to Amazon EKS and [.noloc]`Kubernetes`. +* After fixing issues, wait for the cluster insights to refresh. If all issues have been resolved, <> + +Amazon EKS returns insights related to [.noloc]`Kubernetes` version upgrade readiness. Upgrade insights identify possible issues that could impact [.noloc]`Kubernetes` cluster upgrades. This minimizes the effort that administrators spend preparing for upgrades and increases the reliability of applications on newer [.noloc]`Kubernetes` versions. Clusters are automatically scanned by Amazon EKS against a list of possible [.noloc]`Kubernetes` version upgrade impacting issues. Amazon EKS frequently updates the list of insight checks based on reviews of changes made in each [.noloc]`Kubernetes` version release. + +Amazon EKS upgrade insights speed up the testing and verification process for new versions. They also allow cluster administrators and application developers to leverage the newest [.noloc]`Kubernetes` capabilities by highlighting concerns and offering remediation advice. To see the list of insight checks performed and any relevant issues that Amazon EKS has identified, you can call the Amazon EKS `ListInsights` API operation or look in the Amazon EKS console. + +Cluster insights update periodically. You cannot manually refresh cluster insights. If you fix a cluster issue, it will take some time for cluster insights to update. To determine if a fix was successful, compare the time the change deployed to the "last refresh time" of the cluster insight. + +[[cluster-insights-console,cluster-insights-console.title]] +== View cluster insights (Console) +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. From the cluster list, choose the name of the Amazon EKS cluster for which you want to see the insights. +. Choose *View dashboard*. +. Choose the *Cluster Insights* tab. +. In the *Upgrade Insights* table, you will see the following columns: ++ +** *Name* – The check that was performed by Amazon EKS against the cluster. +** *Insight status* – An insight with a status of "Error" typically means the impacted [.noloc]`Kubernetes` version is N+1 of the current cluster version, while a status of "Warning" means the insight applies to a future [.noloc]`Kubernetes` version N+2 or more. An insight with status of "Passing" means Amazon EKS has not found any issues associated with this insight check in your cluster. An insight status of "Unknown" means Amazon EKS is unable to determine if your cluster is impacted by this insight check. +** *Version* – The [.noloc]`Kubernetes` version that the insight checked for possible issues. +** *Last refresh time* – The time the status of the insight was last refreshed for this cluster. +** *Last transition time* – The time the status of this insight last changed. +** *Description* – Information from the insight check, which includes the alert and recommended actions for remediation. + + +[[cluster-insights-cli,cluster-insights-cli.title]] +== View cluster insights ({aws} CLI) +. Determine which cluster you would like to check for insights. The following command lists the insights for a specified cluster. Make the following modifications to the command as needed and then run the modified command: ++ +** Replace [.replaceable]`region-code` with the code for your {aws} Region. +** Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes,quotes"] +---- +aws eks list-insights --region [.replaceable]`region-code` --cluster-name [.replaceable]`my-cluster` +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ +"insights": + [ + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "name": "Deprecated APIs removed in Kubernetes vX.XX", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557315.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks for usage of deprecated APIs that are scheduled for removal in Kubernetes vX.XX. Upgrading your cluster before migrating to the updated APIs supported by vX.XX could cause application impact.", + "insightStatus": + { + "status": "PASSING", + "reason": "No deprecated API usage detected within the last 30 days.", + }, + }, + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE22222", + "name": "Kubelet version skew", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557309.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks for kubelet versions of worker nodes in the cluster to see if upgrade would cause non compliance with supported Kubernetes kubelet version skew policy.", + "insightStatus": + { + "status": "UNKNOWN", + "reason": "Unable to determine status of node kubelet versions.", + }, + }, + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "name": "Deprecated APIs removed in Kubernetes vX.XX", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557315.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks for usage of deprecated APIs that are scheduled for removal in Kubernetes vX.XX. Upgrading your cluster before migrating to the updated APIs supported by vX.XX could cause application impact.", + "insightStatus": + { + "status": "PASSING", + "reason": "No deprecated API usage detected within the last 30 days.", + }, + }, + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLEaaaaa", + "name": "Cluster health issues", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557314.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks for any cluster health issues that prevent successful upgrade to the next Kubernetes version on EKS.", + "insightStatus": + { + "status": "PASSING", + "reason": "No cluster health issues detected.", + }, + }, + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLEbbbbb", + "name": "EKS add-on version compatibility", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557314.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks version of installed EKS add-ons to ensure they are compatible with the next version of Kubernetes. ", + "insightStatus": { "status": "PASSING", "reason": "All installed EKS add-on versions are compatible with next Kubernetes version."}, + }, + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLEccccc", + "name": "kube-proxy version skew", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557314.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks version of kube-proxy in cluster to see if upgrade would cause non compliance with supported Kubernetes kube-proxy version skew policy.", + "insightStatus": + { + "status": "PASSING", + "reason": "kube-proxy versions match the cluster control plane version.", + }, + }, + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLEddddd", + "name": "Deprecated APIs removed in Kubernetes vX.XX", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "X.XX", + "lastRefreshTime": 1734557315.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks for usage of deprecated APIs that are scheduled for removal in Kubernetes vX.XX. Upgrading your cluster before migrating to the updated APIs supported by vX.XX could cause application impact.", + "insightStatus": + { + "status": "PASSING", + "reason": "No deprecated API usage detected within the last 30 days.", + }, + }, + ], +"nextToken": null, +} +---- +. For descriptive information about the insight, run the following command. Make the following modifications to the command as needed and then run the modified command: ++ +** Replace [.replaceable]`region-code` with the code for your {aws} Region. +** Replace [.replaceable]`a1b2c3d4-5678-90ab-cdef-EXAMPLE22222` with an insight ID retrieved from listing the cluster insights. +** Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes,quotes"] +---- +aws eks describe-insight --region region-code --id [.replaceable]`a1b2c3d4-5678-90ab-cdef-EXAMPLE22222` --cluster-name my-cluster +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ + "insight": + { + "id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE22222", + "name": "Kubelet version skew", + "category": "UPGRADE_READINESS", + "kubernetesVersion": "1.27", + "lastRefreshTime": 1734557309.000, + "lastTransitionTime": 1734557309.000, + "description": "Checks for kubelet versions of worker nodes in the cluster to see if upgrade would cause non compliance with supported Kubernetes kubelet version skew policy.", + "insightStatus": + { + "status": "UNKNOWN", + "reason": "Unable to determine status of node kubelet versions.", + }, + "recommendation": "Upgrade your worker nodes to match the Kubernetes version of your cluster control plane.", + "additionalInfo": + { + "Kubelet version skew policy": "https://kubernetes.io/releases/version-skew-policy/#kubelet", + "Updating a managed node group": "https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html", + }, + "resources": [], + "categorySpecificSummary": + { "deprecationDetails": [], "addonCompatibilityDetails": [] }, + }, +} +---- diff --git a/latest/ug/clusters/clusters.adoc b/latest/ug/clusters/clusters.adoc new file mode 100644 index 00000000..0235a566 --- /dev/null +++ b/latest/ug/clusters/clusters.adoc @@ -0,0 +1,93 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[[clusters,clusters.title]] += Organize workloads with Amazon EKS clusters +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Organize workloads with Amazon EKS clusters +:info_titleabbrev: Clusters + +An Amazon EKS cluster consists of two primary components: + +* The Amazon EKS control plane +* Amazon EKS nodes that are registered with the control plane + +The Amazon EKS control plane consists of control plane nodes that run the [.noloc]`Kubernetes` software, such as `etcd` and the [.noloc]`Kubernetes` API server. The control plane runs in an account managed by {aws}, and the [.noloc]`Kubernetes` API is exposed via the Amazon EKS endpoint associated with your cluster. Each Amazon EKS cluster control plane is single-tenant and unique, and runs on its own set of Amazon EC2 instances. + +All of the data stored by the `etcd` nodes and associated Amazon EBS volumes is encrypted using {aws} KMS. The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support `kubectl exec` `logs` `proxy` data flows). + +[IMPORTANT] +==== + +In the Amazon EKS environment, `etcd` storage is limited to 8 GiB as per https://etcd.io/docs/v3.5/dev-guide/limit/#storage-size-limit[upstream] guidance. You can monitor a metric for the current database size by running the following command. If your cluster has a [.noloc]`Kubernetes` version below `1.28`, replace [.replaceable]`apiserver_storage_size_bytes` with the following: + + + +* [.noloc]`Kubernetes` version `1.27` and `1.26` – `apiserver_storage_db_total_size_in_bytes` +* [.noloc]`Kubernetes` version `1.25` and below – `etcd_db_total_size_in_bytes` + +[source,bash,subs="verbatim,attributes"] +---- +kubectl get --raw=/metrics | grep "apiserver_storage_size_bytes" +---- + +==== + +Amazon EKS nodes run in your {aws} account and connect to your cluster's control plane via the API server endpoint and a certificate file that is created for your cluster. + +[NOTE] +==== + + +* You can find out how the different components of Amazon EKS work in <>. +* For connected clusters, see <>. + +==== + +[.topiclist] +[[Topic List]] + +include::create-cluster-auto.adoc[leveloffset=+1] + +include::create-cluster.adoc[leveloffset=+1] + + +include::cluster-insights.adoc[leveloffset=+1] + + +include::update-cluster.adoc[leveloffset=+1] + + +include::delete-cluster.adoc[leveloffset=+1] + + +include::cluster-endpoint.adoc[leveloffset=+1] + + +include::windows-support.adoc[leveloffset=+1] + + +include::disable-windows-support.adoc[leveloffset=+1] + + +include::private-clusters.adoc[leveloffset=+1] + + +include::kubernetes-versions.adoc[leveloffset=+1] + + +include::platform-versions.adoc[leveloffset=+1] + + +include::autoscaling.adoc[leveloffset=+1] + +include::zone-shift.adoc[leveloffset=+1] + +include::zone-shift-enable.adoc[leveloffset=+1] diff --git a/latest/ug/clusters/create-cluster-auto.adoc b/latest/ug/clusters/create-cluster-auto.adoc new file mode 100644 index 00000000..32a496cf --- /dev/null +++ b/latest/ug/clusters/create-cluster-auto.adoc @@ -0,0 +1,336 @@ +//!!NODE_ROOT
+ +[.topic] +[[create-cluster-auto,create-cluster-auto.title]] += Create an Amazon EKS Auto Mode cluster +:info_doctype: section +:info_title: Create an Amazon EKS Auto Mode cluster +:info_titleabbrev: Create auto cluster +:info_abstract: Learn how to create an Amazon EKS Auto Mode cluster to run Kubernetes applications, including prerequisites, networking options, and add-on configurations. +:idprefix: id_ + +include::../attributes.txt[] + + +[abstract] +-- +Learn how to create an Amazon EKS Auto Mode cluster to run Kubernetes applications, including prerequisites, networking options, and add-on configurations. +-- + +This topic provides detailed instructions for creating an Amazon EKS Auto Mode cluster using advanced configuration options. It covers prerequisites, networking options, and add-on configurations. The process includes setting up IAM roles, configuring cluster settings, specifying networking parameters, and selecting add-ons. Users can create clusters using either the {aws} Management Console or the {aws} CLI, with step-by-step guidance provided for both methods. + +For users seeking a less complex setup process, refer to the following for simplified cluster creation steps: + +* <> +* <> +* <> + +This advanced configuration guide is intended for users who require more granular control over their EKS Auto Mode cluster setup and are familiar with Amazon EKS concepts and requirements. Before proceeding with the advanced configuration, ensure you have met all prerequisites and have a thorough understanding of the networking and IAM requirements for EKS Auto Mode clusters. + +EKS Auto Mode requires additional IAM permissions. For more information, see: + +* <> +* <> + +[NOTE] +==== +If you want to create a cluster without EKS Auto Mode, see <>. + +This topic covers advanced configuration. If you are looking to get started with EKS Auto Mode, see <>. +==== + + +== Prerequisites + +* An existing VPC and subnets that meet <>. Before you deploy a cluster for production use, we recommend that you have a thorough understanding of the VPC and subnet requirements. If you don't have a VPC and subnets, you can create them using an <>. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version`. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. +* An link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] with permissions to create and modify EKS and IAM resources. + +== Create cluster - {aws} console + + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose *Add cluster* and then choose *Create*. +. Under _Configuration options_, select *Custom configuration*. +** This topic covers custom configuration. For information about Quick configuration, see <>. +. Confirm *Use EKS Auto Mode* is enabled. +** This topic covers creating clusters with EKS Auto Mode. For more information about creating clusters without EKS Auto Mode, see <>. +. On the *Configure cluster* page, enter the following fields: ++ +** *Name* – A name for your cluster. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +** *Cluster IAM role* – Choose the Amazon EKS cluster IAM role that you created to allow the [.noloc]`Kubernetes` control plane to manage {aws} resources on your behalf. If you haven't previously created a Cluster IAM role for EKS Auto Mode, select the *Create recommended role* button to create the role with the required permissions in the IAM console. +** *[.noloc]`Kubernetes` version* – The version of [.noloc]`Kubernetes` to use for your cluster. We recommend selecting the latest version, unless you need an earlier version. +** *Upgrade policy* -- The [.noloc]`Kubernetes` version policy you would like to set for your cluster. If you want your cluster to only run on a standard support version, you can choose *Standard*. If you want your cluster to enter extended support at the end of standard support for a version, you can choose *Extended*. If you select a [.noloc]`Kubernetes` version that is currently in extended support, you can not select standard support as an option. +. In the *Auto Mode Compute* section of the configure cluster page, enter the following fields: +** *Node pools* -- Determine if you want to use the build in node pools. For more information, see <>. +** *Node IAM role* -- If you enable any of the built-in node pools, you need to select a Node IAM Role. EKS Auto Mode will assign this role to new nodes. You cannot change this value after the cluster is created. If you haven't previously created a Node IAM role for EKS Auto Mode, select the Create recommended role button to create the role with the required permissions. For more information about this role, see <>. +. In the *Cluster access* section of the configure cluster page, enter the following fields: +** *Bootstrap cluster administrator access* -- The cluster creator is automatically a Kubernetes administrator. If you want to disable this, select *Disallow cluster administrator access*. +** *Cluster authentication mode* -- EKS Auto Mode requires EKS access entries, the EKS API authentication mode. You can optionally enable the `ConfigMap` authentication mode by selecting *EKS API and ConfigMap*. +. Enter the remaining fields on the configure cluster page: +** *Secrets encryption* – (Optional) Choose to enable secrets encryption of [.noloc]`Kubernetes` secrets using a KMS key. You can also enable this after you create your cluster. Before you enable this capability, make sure that you're familiar with the information in xref:enable-kms[Encrypt Kubernetes secrets with {aws} KMS on existing clusters,linkend=enable-kms]. +** *ARC Zonal shift* -- EKS Auto Mode does not support Arc Zonal shift. +** *Tags* – (Optional) Add any tags to your cluster. For more information, see <>. ++ +When you're done with this page, choose *Next*. +. On the *Specify networking* page, select values for the following fields: ++ +** *VPC* – Choose an existing VPC that meets xref:network-requirements-vpc[Amazon EKS VPC requirements,linkend=network-requirements-vpc] to create your cluster in. Before choosing a VPC, we recommend that you're familiar with all of the requirements and considerations in xref:network-reqs[View Amazon EKS networking requirements for VPC and subnets,linkend=network-reqs]. You can't change which VPC you want to use after cluster creation. If no VPCs are listed, then you need to create one first. For more information, see <>. +** *Subnets* – By default, all available subnets in the VPC specified in the previous field are preselected. You must select at least two. ++ +The subnets that you choose must meet the <>. Before selecting subnets, we recommend that you're familiar with all of the <>. ++ +*Security groups* – (Optional) Specify one or more security groups that you want Amazon EKS to associate to the network interfaces that it creates. ++ +Whether you choose any security groups or not, Amazon EKS creates a security group that enables communication between your cluster and your VPC. Amazon EKS associates this security group, and any that you choose, to the network interfaces that it creates. For more information about the cluster security group that Amazon EKS creates, see <>. You can modify the rules in the cluster security group that Amazon EKS creates. +** *Choose cluster IP address family* – You can choose either *IPv4* and *IPv6*. ++ +[.noloc]`Kubernetes` assigns `IPv4` addresses to [.noloc]`Pods` and services, by default. Before deciding to use the `IPv6` family, make sure that you're familiar with all of the considerations and requirements in the <>, <>, <>, and <> topics. If you choose the `IPv6` family, you can't specify an address range for [.noloc]`Kubernetes` to assign `IPv6` service addresses from like you can for the `IPv4` family. [.noloc]`Kubernetes` assigns service addresses from the unique local address range (`fc00::/7`). ++ +** (Optional) Choose *Configure [.noloc]`Kubernetes` Service IP address range* and specify a *Service `IPv4` range*. ++ +Specifying your own range can help prevent conflicts between [.noloc]`Kubernetes` services and other networks peered or connected to your VPC. Enter a range in CIDR notation. For example: `10.2.0.0/16`. ++ +The CIDR block must meet the following requirements: ++ +*** Be within one of the following ranges: `10.0.0.0/8`, `172.16.0.0/12`, or `192.168.0.0/16`. +*** Have a minimum size of `/24` and a maximum size of `/12`. +*** Not overlap with the range of the VPC for your Amazon EKS resources. + ++ +You can only specify this option when using the `IPv4` address family and only at cluster creation. If you don't specify this, then [.noloc]`Kubernetes` assigns service IP addresses from either the `10.100.0.0/16` or `172.20.0.0/16` CIDR blocks. +** For *Cluster endpoint access*, select an option. After your cluster is created, you can change this option. Before selecting a non-default option, make sure to familiarize yourself with the options and their implications. For more information, see <>. ++ +When you're done with this page, choose *Next*. +. (Optional) On the *Configure observability* page, choose which *Metrics* and *Control plane logging* options to turn on. By default, each log type is turned off. ++ +** For more information about the [.noloc]`Prometheus` metrics option, see <>. +** For more information about the *Control plane logging* options, see <>. +** When you're done with this page, choose *Next*. +. On the *Select add-ons* page, choose the add-ons that you want to add to your cluster. You can choose as many *Amazon EKS add-ons* and *{aws} Marketplace add-ons* as you require. If the *{aws} Marketplace add-ons* that you want to install isn't listed, you can click the page numbering to view additional page results or search for available *{aws} Marketplace add-ons* by entering text in the search box. You can also filter by *category*, *vendor*, or *pricing model* and then choose the add-ons from the search results. When creating a cluster, you can view, select, and install any add-on that supports EKS Pod Identities as detailed in <>. +** EKS Auto Mode automates the functionality of certain add-ons. If you plan to deploy EKS Managed Node Groups to your EKS Auto Mode Cluster, select *Additional Amazon EKS Add-ons* and review the options. You may need to install add-ons such as CoreDNS and kube-proxy. EKS will only install the add-ons in this section on self-managed nodes and node groups. +** When you're done with this page, choose *Next*. +. On the *Configure selected add-ons settings* page, select the version that you want to install. You can always update to a later version after cluster creation. ++ +For add-ons that support EKS Pod Identities, you can use the console to automatically generate the role with the name, {aws} managed policy, and trust policy prepopulated specifically for the add-on. You can re-use existing roles or create new roles for supported add-ons. For the steps to use the console to create roles for add-ons that support EKS Pod Identities, see <<_create_add_on_console>>. If an add-on does not support EKS Pod Identity, a message displays with instructions to use the wizard to create the IAM roles for service accounts (IRSA) after the cluster is created. ++ +You can update the configuration of each add-on after cluster creation. For more information about configuring add-ons, see <>. When you're done with this page, choose *Next*. +. On the *Review and create* page, review the information that you entered or selected on the previous pages. If you need to make changes, choose *Edit*. When you're satisfied, choose *Create*. The *Status* field shows *CREATING* while the cluster is provisioned. ++ +NOTE: You might receive an error that one of the Availability Zones in your request doesn't have sufficient capacity to create an Amazon EKS cluster. If this happens, the error output contains the Availability Zones that can support a new cluster. Retry creating your cluster with at least two subnets that are located in the supported Availability Zones for your account. For more information, see <>. ++ +Cluster provisioning takes several minutes. + + +== Create cluster - {aws} CLI + +The following CLI instructions cover creating IAM resources and creating the cluster. + +=== Create an EKS Auto Mode Cluster IAM Role + +#### Step 1: Create the Trust Policy + +Create a trust policy that allows the Amazon EKS service to assume the role. Save the policy as `trust-policy.json`: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +``` + +#### Step 2: Create the IAM Role + +Use the trust policy to create the Cluster IAM Role: + +``` +aws iam create-role \ + --role-name AmazonEKSAutoClusterRole \ + --assume-role-policy-document file://trust-policy.json +``` + +#### Step 3: Note the Role ARN + +Retrieve and save the ARN of the new role for use in subsequent steps: + +``` +aws iam get-role --role-name AmazonEKSAutoClusterRole --query "Role.Arn" --output text +``` + +#### Step 4: Attach Required Policies + +Attach the following {aws} managed policies to the Cluster IAM Role to grant the necessary permissions: + +**AmazonEKSClusterPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSClusterPolicy +``` + +**AmazonEKSComputePolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSComputePolicy +``` + +**AmazonEKSBlockStoragePolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy +``` + +**AmazonEKSLoadBalancingPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy +``` + +**AmazonEKSNetworkingPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoClusterRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy +``` + +### Create an EKS Auto Mode Node IAM Role + +#### Step 1: Create the Trust Policy + +Create a trust policy that allows the Amazon EKS service to assume the role. Save the policy as `node-trust-policy.json`: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` + +#### Step 2: Create the Node IAM Role + +Use the **node-trust-policy.json** file from the previous step to define which entities can assume the role. Run the following command to create the Node IAM Role: + +``` +aws iam create-role \ + --role-name AmazonEKSAutoNodeRole \ + --assume-role-policy-document file://node-trust-policy.json +``` + +#### Step 3: Note the Role ARN + +After creating the role, retrieve and save the ARN of the Node IAM Role. You will need this ARN in subsequent steps. Use the following command to get the ARN: + +``` +aws iam get-role --role-name AmazonEKSAutoNodeRole --query "Role.Arn" --output text +``` + +#### Step 4: Attach Required Policies + +Attach the following {aws} managed policies to the Node IAM Role to provide the necessary permissions: + +**AmazonEKSWorkerNodeMinimalPolicy**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoNodeRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy +``` + +**AmazonEC2ContainerRegistryPullOnly**: + +``` +aws iam attach-role-policy \ + --role-name AmazonEKSAutoNodeRole \ + --policy-arn arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly +``` + +=== Create cluster + +. Create your cluster with the command that follows. Before running the command, make the following replacements: ++ +** Replace [.replaceable]`region-code` with the {aws} Region that you want to create your cluster in. +** Replace [.replaceable]`my-cluster` with a name for your cluster. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +** Replace [.replaceable]`1.30` with any xref:kubernetes-versions[Amazon EKS supported version,linkend=kubernetes-versions]. +** Replace [.replaceable]`111122223333` with your account ID +** If you have created differently named IAM Roles for the Cluster and Node roles, replace the ARNs. +** Replace the values for `subnetIds` with your own. You can also add additional IDs. You must specify at least two subnet IDs. ++ +The subnets that you choose must meet the xref:network-requirements-subnets[Amazon EKS subnet requirements,linkend=network-requirements-subnets]. Before selecting subnets, we recommend that you're familiar with all of the xref:network-reqs[Amazon EKS VPC and subnet requirements and considerations,linkend=network-reqs]. +** If you don't want to specify a security group ID, remove `,securityGroupIds=sg-` from the command. If you want to specify one or more security group IDs, replace the values for `securityGroupIds` with your own. You can also add additional IDs. ++ +Whether you choose any security groups or not, Amazon EKS creates a security group that enables communication between your cluster and your VPC. Amazon EKS associates this security group, and any that you choose, to the network interfaces that it creates. For more information about the cluster security group that Amazon EKS creates, see <>. You can modify the rules in the cluster security group that Amazon EKS creates. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-cluster \ + --region region-code \ + --name my-cluster \ + --kubernetes-version 1.30 \ + --role-arn arn:aws:iam::111122223333:role/AmazonEKSAutoClusterRole \ + --resources-vpc-config '{"subnetIds": ["subnet-ExampleID1","subnet-ExampleID2"], "securityGroupIds": ["sg-ExampleID1"], "endpointPublicAccess": true, "endpointPrivateAccess": true}' \ + --compute-config '{"enabled": true, "nodeRoleArn": "arn:aws:iam::111122223333:role/AmazonEKSAutoNodeRole", "nodePools": ["general-purpose", "system"]}' \ + --kubernetes-network-config '{"elasticLoadBalancing": {"enabled": true}}' \ + --storage-config '{"blockStorage": {"enabled": true}}' \ + --access-config '{"authenticationMode": "API"}' +---- ++ +NOTE: You might receive an error that one of the Availability Zones in your request doesn't have sufficient capacity to create an Amazon EKS cluster. If this happens, the error output contains the Availability Zones that can support a new cluster. Retry creating your cluster with at least two subnets that are located in the supported Availability Zones for your account. For more information, see <>. ++ +The following are optional settings that, if required, must be added to the previous command. You can only enable these options when you create the cluster, not after. +** If you want to specify which `IPv4` Classless Inter-domain Routing (CIDR) block [.noloc]`Kubernetes` assigns service IP addresses from, you must specify it by adding the `--kubernetes-network-config serviceIpv4Cidr=` to the following command. ++ +Specifying your own range can help prevent conflicts between [.noloc]`Kubernetes` services and other networks peered or connected to your VPC. Enter a range in CIDR notation. For example: `10.2.0.0/16`. ++ +The CIDR block must meet the following requirements: ++ +*** Be within one of the following ranges: `10.0.0.0/8`, `172.16.0.0/12`, or `192.168.0.0/16`. +*** Have a minimum size of `/24` and a maximum size of `/12`. +*** Not overlap with the range of the VPC for your Amazon EKS resources. ++ +You can only specify this option when using the `IPv4` address family and only at cluster creation. If you don't specify this, then [.noloc]`Kubernetes` assigns service IP addresses from either the `10.100.0.0/16` or `172.20.0.0/16` CIDR blocks. +** If you're creating a cluster and want the cluster to assign `IPv6` addresses to [.noloc]`Pods` and services instead of `IPv4` addresses, add `--kubernetes-network-config ipFamily=ipv6` to the following command. ++ +[.noloc]`Kubernetes` assigns `IPv4` addresses to [.noloc]`Pods` and services, by default. Before deciding to use the `IPv6` family, make sure that you're familiar with all of the considerations and requirements in the <>, <>, <>, and <> topics. If you choose the `IPv6` family, you can't specify an address range for [.noloc]`Kubernetes` to assign `IPv6` service addresses from like you can for the `IPv4` family. [.noloc]`Kubernetes` assigns service addresses from the unique local address range (`fc00::/7`). ++ +. It takes several minutes to provision the cluster. You can query the status of your cluster with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --region region-code --name my-cluster --query "cluster.status" +---- + +== Next steps + +* <> +* <> +* <>. +* <>. +* <>. diff --git a/latest/ug/clusters/create-cluster.adoc b/latest/ug/clusters/create-cluster.adoc new file mode 100644 index 00000000..db3350e7 --- /dev/null +++ b/latest/ug/clusters/create-cluster.adoc @@ -0,0 +1,318 @@ +//!!NODE_ROOT
+ +[.topic] +[[create-cluster,create-cluster.title]] += Create an Amazon EKS cluster +:info_doctype: section +:info_title: Create an Amazon EKS cluster +:info_titleabbrev: Create a cluster +:info_abstract: Learn how to create an Amazon EKS cluster to run Kubernetes applications, including prerequisites, networking options, and add-on configurations. + +include::../attributes.txt[] + + +[abstract] +-- +Learn how to create an Amazon EKS cluster to run Kubernetes applications, including prerequisites, networking options, and add-on configurations. +-- + +[NOTE] +==== +This topic covers creating EKS clusters without EKS Auto Mode. + +For detailed instructions on creating an EKS Auto Mode cluster, see <>. + +To get started with EKS Auto Mode, see <>. +==== + + +This topic provides an overview of the available options and describes what to consider when you create an Amazon EKS cluster. If you need to create a cluster with your on-premises infrastructure as the compute for nodes, see <>. If this is your first time creating an Amazon EKS cluster, we recommend that you follow one of our guides in <>. These guides help you to create a simple, default cluster without expanding into all of the available options. + +== Prerequisites + +* An existing VPC and subnets that meet <>. Before you deploy a cluster for production use, we recommend that you have a thorough understanding of the VPC and subnet requirements. If you don't have a VPC and subnets, you can create them using an <>. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* An link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] with permissions to `create` and `describe` an Amazon EKS cluster. For more information, see <> and <>. + +== Step 1: Create cluster IAM role + +. If you already have a cluster IAM role, or you're going to create your cluster with `eksctl`, then you can skip this step. By default, `eksctl` creates a role for you. ++ +. Run the following command to create an IAM trust policy JSON file. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >eks-cluster-role-trust-policy.json <>. ++ +Attach the Amazon EKS managed policy named link:aws-managed-policy/latest/reference/AmazonEKSClusterPolicy.html#AmazonEKSClusterPolicy-json[AmazonEKSClusterPolicy,type="documentation"] to the role. To attach an IAM policy to an link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"], the principal that is attaching the policy must be assigned one of the following IAM actions (permissions): `iam:AttachUserPolicy` or `iam:AttachRolePolicy`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy --policy-arn {arn-aws}iam::aws:policy/AmazonEKSClusterPolicy --role-name myAmazonEKSClusterRole +---- + +== Step 2: Create cluster + +You can create a cluster by using: + +* xref:step2-eksctl[`eksctl`] +* xref:step2-console[the {aws-management-console}] +* xref:step2-cli[the {aws} CLI] + +[[step2-eksctl,step2-eksctl.title]] +=== Create cluster - eksctl + +. You need version `{eksctl-min-version}` or later of the `eksctl` command line tool installed on your device or {aws} CloudShell. To install or update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. +. Create an Amazon EKS `IPv4` cluster with the Amazon EKS default [.noloc]`Kubernetes` version in your default {aws} Region. Before running command, make the following replacements: +. Replace [.replaceable]`region-code` with the {aws} Region that you want to create your cluster in. +. Replace [.replaceable]`my-cluster` with a name for your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +. Replace [.replaceable]`1.29` with any xref:kubernetes-versions[Amazon EKS supported version,linkend=kubernetes-versions]. +. Change the values for `vpc-private-subnets` to meet your requirements. You can also add additional IDs. You must specify at least two subnet IDs. If you'd rather specify public subnets, you can change `--vpc-private-subnets` to `--vpc-public-subnets`. Public subnets have an associated route table with a route to an internet gateway, but private subnets don't have an associated route table. We recommend using private subnets whenever possible. ++ +The subnets that you choose must meet the <>. Before selecting subnets, we recommend that you're familiar with all of the <>. ++ +[source,bash,subs="verbatim,attributes"] +. Run the following command: ++ +---- +eksctl create cluster --name my-cluster --region region-code --version 1.29 --vpc-private-subnets subnet-ExampleID1,subnet-ExampleID2 --without-nodegroup +---- ++ +Cluster provisioning takes several minutes. While the cluster is being created, several lines of output appear. The last line of output is similar to the following example line. ++ +[source,bash,subs="verbatim,attributes"] +---- +[✓] EKS cluster "my-cluster" in "region-code" region is ready +---- +. Continue with <> + +==== Optional Settings + +To see the most options that you can specify when creating a cluster with `eksctl`, use the `eksctl create cluster --help` command. To see all the available options, you can use a `config` file. For more information, see https://eksctl.io/usage/creating-and-managing-clusters/#using-config-files[Using config files] and the https://eksctl.io/usage/schema/[config file schema] in the `eksctl` documentation. You can find https://github.com/weaveworks/eksctl/tree/master/examples[config file examples] on [.noloc]`GitHub`. + +The following are optional settings that, if required, must be added to the previous command. You can only enable these options when you create the cluster, not after. If you need to specify these options, you must create the cluster with an https://eksctl.io/usage/creating-and-managing-clusters/#using-config-files[eksctl config file] and specify the settings, rather than using the previous command. + +* If you want to specify one or more security groups that Amazon EKS assigns to the network interfaces that it creates, specify the https://eksctl.io/usage/schema/#vpc-securityGroup[securityGroup] option. ++ +Whether you choose any security groups or not, Amazon EKS creates a security group that enables communication between your cluster and your VPC. Amazon EKS associates this security group, and any that you choose, to the network interfaces that it creates. For more information about the cluster security group that Amazon EKS creates, see <>. You can modify the rules in the cluster security group that Amazon EKS creates. +* If you want to specify which `IPv4` Classless Inter-domain Routing (CIDR) block [.noloc]`Kubernetes` assigns service IP addresses from, specify the https://eksctl.io/usage/schema/#kubernetesNetworkConfig-serviceIPv4CIDR[serviceIPv4CIDR] option. ++ +Specifying your own range can help prevent conflicts between [.noloc]`Kubernetes` services and other networks peered or connected to your VPC. Enter a range in CIDR notation. For example: `10.2.0.0/16`. ++ +The CIDR block must meet the following requirements: ++ +** Be within one of the following ranges: `10.0.0.0/8`, `172.16.0.0/12`, or `192.168.0.0/16`. +** Have a minimum size of `/24` and a maximum size of `/12`. +** Not overlap with the range of the VPC for your Amazon EKS resources. ++ +You can only specify this option when using the `IPv4` address family and only at cluster creation. If you don't specify this, then [.noloc]`Kubernetes` assigns service IP addresses from either the `10.100.0.0/16` or `172.20.0.0/16` CIDR blocks. +* If you're creating cluster and want the cluster to assign `IPv6` addresses to [.noloc]`Pods` and services instead of `IPv4` addresses, specify the https://eksctl.io/usage/schema/#kubernetesNetworkConfig-ipFamily[ipFamily] option. ++ +[.noloc]`Kubernetes` assigns `IPv4` addresses to [.noloc]`Pods` and services, by default. Before deciding to use the `IPv6` family, make sure that you're familiar with all of the considerations and requirements in the xref:network-requirements-vpc[VPC requirements and considerations,linkend=network-requirements-vpc], xref:network-requirements-subnets[Subnet requirements and considerations,linkend=network-requirements-subnets], xref:sec-group-reqs[View Amazon EKS security group requirements for clusters,linkend=sec-group-reqs], and <> topics. If you choose the `IPv6` family, you can't specify an address range for [.noloc]`Kubernetes` to assign `IPv6` service addresses from like you can for the `IPv4` family. [.noloc]`Kubernetes` assigns service addresses from the unique local address range (`fc00::/7`). + +[[step2-console,step2-console.title]] +=== Create cluster - {aws} console + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose *Add cluster* and then choose *Create*. +. Under *Configuration options* select *Custom configuration* +** For information about quickly creating a cluster wih EKS Auto Mode, see <>. +. Under *EKS Auto Mode*, toggle *Use EKS Auto Mode* off. +** For information about creating an EKS Auto Mode cluster with custom configuration, see <>. +. On the *Configure cluster* page, enter the following fields: ++ +** *Name* – A name for your cluster. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +** *Cluster IAM role* – Choose the Amazon EKS cluster IAM role that you created to allow the [.noloc]`Kubernetes` control plane to manage {aws} resources on your behalf. +** *[.noloc]`Kubernetes` version* – The version of [.noloc]`Kubernetes` to use for your cluster. We recommend selecting the latest version, unless you need an earlier version. +** *Support type* -- The [.noloc]`Kubernetes` version policy you would like to set for your cluster. If you want your cluster to only run on a standard support version, you can choose *Standard support*. If you want your cluster to enter extended support at the end of standard support for a version, you can choose *Extended support*. If you select a [.noloc]`Kubernetes` version that is currently in extended support, you can not select standard support as an option. +** *Secrets encryption* – (Optional) Choose to enable secrets encryption of [.noloc]`Kubernetes` secrets using a KMS key. You can also enable this after you create your cluster. Before you enable this capability, make sure that you're familiar with the information in xref:enable-kms[Encrypt Kubernetes secrets with {aws} KMS on existing clusters,linkend=enable-kms]. +** *Tags* – (Optional) Add any tags to your cluster. For more information, see <>. +** *ARC Zonal shift* - (Optional) You can use Route53 Application Recovery controller to mitigate impaired availability zones. For more information, see <>. +. In the *Cluster access* section of the configure cluster page, enter the following fields: +** *Bootstrap cluster administrator access* -- The cluster creator is automatically a Kubernetes administrator. If you want to disable this, select *Disallow cluster administrator access*. +** *Cluster authentication mode* -- Determine how you want to grant IAM users and roles access to Kubernetes APIs. For more information, see <>. ++ +When you're done with this page, choose *Next*. +. On the *Specify networking* page, select values for the following fields: ++ +** *VPC* – Choose an existing VPC that meets xref:network-requirements-vpc[Amazon EKS VPC requirements,linkend=network-requirements-vpc] to create your cluster in. Before choosing a VPC, we recommend that you're familiar with all of the requirements and considerations in xref:network-reqs[View Amazon EKS networking requirements for VPC and subnets,linkend=network-reqs]. You can't change which VPC you want to use after cluster creation. If no VPCs are listed, then you need to create one first. For more information, see <>. +** *Subnets* – By default, all available subnets in the VPC specified in the previous field are preselected. You must select at least two. ++ +The subnets that you choose must meet the <>. Before selecting subnets, we recommend that you're familiar with all of the <>. ++ +*Security groups* – (Optional) Specify one or more security groups that you want Amazon EKS to associate to the network interfaces that it creates. ++ +Whether you choose any security groups or not, Amazon EKS creates a security group that enables communication between your cluster and your VPC. Amazon EKS associates this security group, and any that you choose, to the network interfaces that it creates. For more information about the cluster security group that Amazon EKS creates, see <>. You can modify the rules in the cluster security group that Amazon EKS creates. +** *Choose cluster IP address family* – You can choose either *IPv4* and *IPv6*. ++ +[.noloc]`Kubernetes` assigns `IPv4` addresses to [.noloc]`Pods` and services, by default. Before deciding to use the `IPv6` family, make sure that you're familiar with all of the considerations and requirements in the <>, <>, <>, and <> topics. If you choose the `IPv6` family, you can't specify an address range for [.noloc]`Kubernetes` to assign `IPv6` service addresses from like you can for the `IPv4` family. [.noloc]`Kubernetes` assigns service addresses from the unique local address range (`fc00::/7`). ++ +** (Optional) Choose *Configure [.noloc]`Kubernetes` Service IP address range* and specify a *Service `IPv4` range*. ++ +Specifying your own range can help prevent conflicts between [.noloc]`Kubernetes` services and other networks peered or connected to your VPC. Enter a range in CIDR notation. For example: `10.2.0.0/16`. ++ +The CIDR block must meet the following requirements: ++ +*** Be within one of the following ranges: `10.0.0.0/8`, `172.16.0.0/12`, or `192.168.0.0/16`. +*** Have a minimum size of `/24` and a maximum size of `/12`. +*** Not overlap with the range of the VPC for your Amazon EKS resources. + ++ +You can only specify this option when using the `IPv4` address family and only at cluster creation. If you don't specify this, then [.noloc]`Kubernetes` assigns service IP addresses from either the `10.100.0.0/16` or `172.20.0.0/16` CIDR blocks. +** For *Cluster endpoint access*, select an option. After your cluster is created, you can change this option. Before selecting a non-default option, make sure to familiarize yourself with the options and their implications. For more information, see <>. ++ +When you're done with this page, choose *Next*. +. (Optional) On the *Configure observability* page, choose which *Metrics* and *Control plane logging* options to turn on. By default, each log type is turned off. ++ +** For more information about the [.noloc]`Prometheus` metrics option, see <>. +** For more information about the *Control plane logging* options, see <>. + ++ +When you're done with this page, choose *Next*. +. On the *Select add-ons* page, choose the add-ons that you want to add to your cluster. Certain add-ons are pre-selected. You can choose as many *Amazon EKS add-ons* and *{aws} Marketplace add-ons* as you require. If the *{aws} Marketplace add-ons* that you want to install isn't listed, you can click the page numbering to view additional page results or search for available *{aws} Marketplace add-ons* by entering text in the search box. You can also filter by *category*, *vendor*, or *pricing model* and then choose the add-ons from the search results. When creating a cluster, you can view, select, and install any add-on that supports EKS Pod Identities as detailed in <>. ++ +When you're done with this page, choose *Next*. ++ +Some add-ons, such as Amazon VPC CNI, CoreDNS, and kube-proxy, are installed by default. If you disable any of the default add-ons, this may affect your ability to run Kubernetes applications. +. On the *Configure selected add-ons settings* page, select the version that you want to install. You can always update to a later version after cluster creation. ++ +For add-ons that support EKS Pod Identities, you can use the console to automatically generate the role with the name, {aws} managed policy, and trust policy prepopulated specifically for the add-on. You can re-use existing roles or create new roles for supported add-ons. For the steps to use the console to create roles for add-ons that support EKS Pod Identities, see <<_create_add_on_console>>. If an add-on does not support EKS Pod Identity, a message displays with instructions to use the wizard to create the IAM roles for service accounts (IRSA) after the cluster is created. ++ +You can update the configuration of each add-on after cluster creation. For more information about configuring add-ons, see <>. When you're done with this page, choose *Next*. +. On the *Review and create* page, review the information that you entered or selected on the previous pages. If you need to make changes, choose *Edit*. When you're satisfied, choose *Create*. The *Status* field shows *CREATING* while the cluster is provisioned. ++ +NOTE: You might receive an error that one of the Availability Zones in your request doesn't have sufficient capacity to create an Amazon EKS cluster. If this happens, the error output contains the Availability Zones that can support a new cluster. Retry creating your cluster with at least two subnets that are located in the supported Availability Zones for your account. For more information, see <>. ++ +Cluster provisioning takes several minutes. +. Continue with <> + +[[step2-cli,step2-cli.title]] +=== Create cluster - {aws} CLI +. Create your cluster with the command that follows. Before running the command, make the following replacements: ++ +** Replace [.replaceable]`region-code` with the {aws} Region that you want to create your cluster in. +** Replace [.replaceable]`my-cluster` with a name for your cluster. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +** Replace [.replaceable]`1.30` with any xref:kubernetes-versions[Amazon EKS supported version,linkend=kubernetes-versions]. +** Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`myAmazonEKSClusterRole` with the name of your cluster IAM role. +** Replace the values for `subnetIds` with your own. You can also add additional IDs. You must specify at least two subnet IDs. ++ +The subnets that you choose must meet the xref:network-requirements-subnets[Amazon EKS subnet requirements,linkend=network-requirements-subnets]. Before selecting subnets, we recommend that you're familiar with all of the xref:network-reqs[Amazon EKS VPC and subnet requirements and considerations,linkend=network-reqs]. +** If you don't want to specify a security group ID, remove `,securityGroupIds=sg-` from the command. If you want to specify one or more security group IDs, replace the values for `securityGroupIds` with your own. You can also add additional IDs. ++ +Whether you choose any security groups or not, Amazon EKS creates a security group that enables communication between your cluster and your VPC. Amazon EKS associates this security group, and any that you choose, to the network interfaces that it creates. For more information about the cluster security group that Amazon EKS creates, see <>. You can modify the rules in the cluster security group that Amazon EKS creates. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-cluster --region region-code --name my-cluster --kubernetes-version 1.30 \ + --role-arn {arn-aws}iam::111122223333:role/myAmazonEKSClusterRole \ + --resources-vpc-config subnetIds=subnet-ExampleID1,subnet-ExampleID2,securityGroupIds=sg-ExampleID1 +---- ++ +NOTE: You might receive an error that one of the Availability Zones in your request doesn't have sufficient capacity to create an Amazon EKS cluster. If this happens, the error output contains the Availability Zones that can support a new cluster. Retry creating your cluster with at least two subnets that are located in the supported Availability Zones for your account. For more information, see <>. ++ +The following are optional settings that, if required, must be added to the previous command. You can only enable these options when you create the cluster, not after. + +** By default, EKS installs multiple networking add-ons during cluster creation. This includes the Amazon VPC CNI, CoreDNS, and kube-proxy. ++ +If you'd like to disable the installation of these default networking add-ons, use the parameter below. This may be used for alternate CNIs, such as Cilium. Review the link:eks/latest/APIReference/API_CreateCluster.html[EKS API reference,type="documentation"] for more information. ++ +`aws eks create-cluster --bootstrapSelfManagedAddons false` +** If you want to specify which `IPv4` Classless Inter-domain Routing (CIDR) block [.noloc]`Kubernetes` assigns service IP addresses from, you must specify it by adding the `--kubernetes-network-config serviceIpv4Cidr=` to the following command. ++ +Specifying your own range can help prevent conflicts between [.noloc]`Kubernetes` services and other networks peered or connected to your VPC. Enter a range in CIDR notation. For example: `10.2.0.0/16`. ++ +The CIDR block must meet the following requirements: ++ +*** Be within one of the following ranges: `10.0.0.0/8`, `172.16.0.0/12`, or `192.168.0.0/16`. +*** Have a minimum size of `/24` and a maximum size of `/12`. +*** Not overlap with the range of the VPC for your Amazon EKS resources. + ++ +You can only specify this option when using the `IPv4` address family and only at cluster creation. If you don't specify this, then [.noloc]`Kubernetes` assigns service IP addresses from either the `10.100.0.0/16` or `172.20.0.0/16` CIDR blocks. +** If you're creating a cluster and want the cluster to assign `IPv6` addresses to [.noloc]`Pods` and services instead of `IPv4` addresses, add `--kubernetes-network-config ipFamily=ipv6` to the following command. ++ +[.noloc]`Kubernetes` assigns `IPv4` addresses to [.noloc]`Pods` and services, by default. Before deciding to use the `IPv6` family, make sure that you're familiar with all of the considerations and requirements in the <>, <>, <>, and <> topics. If you choose the `IPv6` family, you can't specify an address range for [.noloc]`Kubernetes` to assign `IPv6` service addresses from like you can for the `IPv4` family. [.noloc]`Kubernetes` assigns service addresses from the unique local address range (`fc00::/7`). ++ +. It takes several minutes to provision the cluster. You can query the status of your cluster with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --region region-code --name my-cluster --query "cluster.status" +---- ++ +Don't proceed to the next step until the output returned is `ACTIVE`. +. Continue with <> + +[[step3,step3.title]] +== Step 3: Update kubeconfig +. If you created your cluster using `eksctl`, then you can skip this step. This is because `eksctl` already completed this step for you. Enable `kubectl` to communicate with your cluster by adding a new context to the `kubectl` `config` file. For more information about how to create and update the file, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-kubeconfig --region region-code --name my-cluster +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +Added new context {arn-aws}eks:region-code:111122223333:cluster/my-cluster to /home/username/.kube/config +---- +. Confirm communication with your cluster by running the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes ClusterIP 10.100.0.1 443/TCP 28h +---- + +== Step 4: Cluster setup + +. (Recommended) To use some Amazon EKS add-ons, or to enable individual [.noloc]`Kubernetes` workloads to have specific {aws} Identity and Access Management (IAM) permissions, xref:enable-iam-roles-for-service-accounts[create an IAM OpenID Connect (OIDC) provider,linkend=enable-iam-roles-for-service-accounts] for your cluster. You only need to create an IAM [.noloc]`OIDC` provider for your cluster once. To learn more about Amazon EKS add-ons, see <>. To learn more about assigning specific IAM permissions to your workloads, see <>. +. (Recommended) Configure your cluster for the [.noloc]`Amazon VPC CNI plugin for Kubernetes` plugin before deploying Amazon EC2 nodes to your cluster. By default, the plugin was installed with your cluster. When you add Amazon EC2 nodes to your cluster, the plugin is automatically deployed to each Amazon EC2 node that you add. The plugin requires you to attach one of the following IAM policies to an IAM role. If your cluster uses the `IPv4` family, use the link:aws-managed-policy/latest/reference/AmazonEKS_CNI_Policy.html[AmazonEKS_CNI_Policy,type="documentation"] managed IAM policy. If your cluster uses the `IPv6` family, use an xref:cni-iam-role-create-ipv6-policy[IAM policy that you create,linkend=cni-iam-role-create-ipv6-policy]. ++ +The IAM role that you attach the policy to can be the node IAM role, or a dedicated role used only for the plugin. We recommend attaching the policy to this role. For more information about creating the role, see <> or <>. +. If you deployed your cluster using the {aws-management-console}, you can skip this step. The {aws-management-console} deploys the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, [.noloc]`CoreDNS`, and `kube-proxy` Amazon EKS add-ons, by default. ++ +If you deploy your cluster using either `eksctl` or the {aws} CLI, then the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, [.noloc]`CoreDNS`, and `kube-proxy` self-managed add-ons are deployed. You can migrate the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, [.noloc]`CoreDNS`, and `kube-proxy` self-managed add-ons that are deployed with your cluster to Amazon EKS add-ons. For more information, see <>. +. (Optional) If you haven't already done so, you can enable [.noloc]`Prometheus` metrics for your cluster. For more information, see link:prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-create[Create a scraper,type="documentation"] in the _Amazon Managed Service for Prometheus User Guide_. +. If you plan to deploy workloads to your cluster that use Amazon EBS volumes , and you created a `1.23` or later cluster, then you must install the xref:ebs-csi[Amazon EBS CSI,linkend=ebs-csi] to your cluster before deploying the workloads. + +== Next steps + +* The link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that created the cluster is the only principal that has access to the cluster. <> so they can access your cluster. +* If the IAM principal that created the cluster only has the minimum IAM permissions referenced in the prerequisites, then you might want to add additional Amazon EKS permissions for that principal. For more information about granting Amazon EKS permissions to IAM principals, see <>. +* If you want the IAM principal that created the cluster, or any other principals to view [.noloc]`Kubernetes` resources in the Amazon EKS console, grant the <> to the entities. +* If you want nodes and IAM principals to access your cluster from within your VPC, enable the private endpoint for your cluster. The public endpoint is enabled by default. You can disable the public endpoint once you've enabled the private endpoint, if desired. For more information, see <>. +* <>. +* <>. +* <>. diff --git a/latest/ug/clusters/delete-cluster.adoc b/latest/ug/clusters/delete-cluster.adoc new file mode 100644 index 00000000..d9da2815 --- /dev/null +++ b/latest/ug/clusters/delete-cluster.adoc @@ -0,0 +1,193 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[delete-cluster,delete-cluster.title]] += Delete a cluster +:info_doctype: section +:info_title: Delete a cluster +:info_titleabbrev: Delete a cluster +:info_abstract: Learn how to delete Amazon EKS clusters, including managed and self-managed node groups, Fargate profiles, related services, and {aws} CloudFormation stacks using eksctl, {aws-management-console}, or {aws} CLI for cost optimization and resource cleanup. + +[abstract] +-- +Learn how to delete Amazon EKS clusters, including managed and self-managed node groups, Fargate profiles, related services, and {aws} CloudFormation stacks using eksctl, {aws-management-console}, or {aws} CLI for cost optimization and resource cleanup. +-- + +When you're done using an Amazon EKS cluster, you should delete the resources associated with it so that you don't incur any unnecessary costs. + + + +You can delete a cluster with `eksctl`, the {aws-management-console}, or the {aws} CLI. + +== Considerations + +* If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. +* If you receive an error because the cluster creator has been removed, see link:premiumsupport/knowledge-center/eks-api-server-unauthorized-error[this article,type="marketing"] to resolve. +* Amazon Managed Service for Prometheus resources are outside of the cluster lifecycle and need to be maintained independent of the cluster. When you delete your cluster, make sure to also delete any applicable scrapers to stop applicable costs. For more information, see link:prometheus/latest/userguide/AMP-collector-how-to.html#AMP-collector-list-delete[Find and delete scrapers,type="documentation"] in the _Amazon Managed Service for Prometheus User Guide_. +* To remove a connected cluster, see <> + +=== Considerations for EKS Auto Mode + +* Any EKS Auto Mode Nodes will be deleted, including the EC2 managed instances +* All load balancers will be deleted + +For more information, see <>. + +== Delete cluster (eksctl) + +This procedure requires `eksctl` version `{eksctl-min-version}` or later. You can check your version with the following command: + +[source,bash,subs="verbatim,attributes"] +---- +eksctl version +---- + +For instructions on how to install or upgrade `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. + +. List all services running in your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc --all-namespaces +---- +.. Delete any services that have an associated `EXTERNAL-IP` value. These services are fronted by an Elastic Load Balancing load balancer, and you must delete them in [.noloc]`Kubernetes` to allow the load balancer and associated resources to be properly released. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete svc service-name +---- +. Delete the cluster and its associated nodes with the following command, replacing [.replaceable]`prod` with your cluster name. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl delete cluster --name prod +---- ++ +Output: ++ +[source,bash,subs="verbatim,attributes"] +---- +[ℹ] using region region-code +[ℹ] deleting EKS cluster "prod" +[ℹ] will delete stack "eksctl-prod-nodegroup-standard-nodes" +[ℹ] waiting for stack "eksctl-prod-nodegroup-standard-nodes" to get deleted +[ℹ] will delete stack "eksctl-prod-cluster" +[✔] the following EKS cluster resource(s) for "prod" will be deleted: cluster. If in doubt, check CloudFormation console +---- + + +== Delete cluster ({aws} console) +. List all services running in your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc --all-namespaces +---- +. Delete any services that have an associated `EXTERNAL-IP` value. These services are fronted by an Elastic Load Balancing load balancer, and you must delete them in [.noloc]`Kubernetes` to allow the load balancer and associated resources to be properly released. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete svc service-name +---- +. Delete all node groups and Fargate profiles. ++ +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, choose Amazon EKS *Clusters*, and then in the tabbed list of clusters, choose the name of the cluster that you want to delete. +.. Choose the *Compute* tab and choose a node group to delete. Choose *Delete*, enter the name of the node group, and then choose *Delete*. Delete all node groups in the cluster. ++ +NOTE: The node groups listed are xref:managed-node-groups[managed node groups,linkend=managed-node-groups] only. +.. Choose a *Fargate Profile* to delete, select *Delete*, enter the name of the profile, and then choose *Delete*. Delete all Fargate profiles in the cluster. +. Delete all self-managed node {aws} CloudFormation stacks. ++ +.. Open the link:cloudformation/[{aws} CloudFormation console,type="console"]. +.. Choose the node stack to delete, and then choose *Delete*. +.. In the *Delete stack* confirmation dialog box, choose *Delete stack*. Delete all self-managed node stacks in the cluster. +. Delete the cluster. ++ +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. choose the cluster to delete and choose *Delete*. +.. On the delete cluster confirmation screen, choose *Delete*. +. (Optional) Delete the VPC {aws} CloudFormation stack. ++ +.. Open the link:cloudformation/[{aws} CloudFormation console,type="console"]. +.. Select the VPC stack to delete, and then choose *Delete*. +.. In the *Delete stack* confirmation dialog box, choose *Delete stack*. + + +== Delete cluster ({aws} CLI) + +. List all services running in your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc --all-namespaces +---- +. Delete any services that have an associated `EXTERNAL-IP` value. These services are fronted by an Elastic Load Balancing load balancer, and you must delete them in [.noloc]`Kubernetes` to allow the load balancer and associated resources to be properly released. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete svc service-name +---- +. Delete all node groups and Fargate profiles. ++ +.. List the node groups in your cluster with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks list-nodegroups --cluster-name my-cluster +---- ++ +NOTE: The node groups listed are xref:managed-node-groups[managed node groups,linkend=managed-node-groups] only. +.. Delete each node group with the following command. Delete all node groups in the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-nodegroup --nodegroup-name my-nodegroup --cluster-name my-cluster +---- +.. List the Fargate profiles in your cluster with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks list-fargate-profiles --cluster-name my-cluster +---- +.. Delete each Fargate profile with the following command. Delete all Fargate profiles in the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-fargate-profile --fargate-profile-name my-fargate-profile --cluster-name my-cluster +---- +. Delete all self-managed node {aws} CloudFormation stacks. ++ +.. List your available {aws} CloudFormation stacks with the following command. Find the node template name in the resulting output. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation list-stacks --query "StackSummaries[].StackName" +---- +.. Delete each node stack with the following command, replacing [.replaceable]`node-stack` with your node stack name. Delete all self-managed node stacks in the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation delete-stack --stack-name node-stack +---- +. Delete the cluster with the following command, replacing [.replaceable]`my-cluster` with your cluster name. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-cluster --name my-cluster +---- +. (Optional) Delete the VPC {aws} CloudFormation stack. ++ +.. List your available {aws} CloudFormation stacks with the following command. Find the VPC template name in the resulting output. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation list-stacks --query "StackSummaries[].StackName" +---- +.. Delete the VPC stack with the following command, replacing [.replaceable]`my-vpc-stack` with your VPC stack name. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation delete-stack --stack-name my-vpc-stack +---- diff --git a/latest/ug/clusters/disable-extended-support.adoc b/latest/ug/clusters/disable-extended-support.adoc new file mode 100644 index 00000000..eb45b093 --- /dev/null +++ b/latest/ug/clusters/disable-extended-support.adoc @@ -0,0 +1,38 @@ +include::../attributes.txt[] +[.topic] +[[disable-extended-support,disable-extended-support.title]] += Prevent increased cluster costs by disabling EKS extended support +:info_titleabbrev: Disable extended support + +This topic describes how to set the _upgrade policy_ of an EKS cluster to disable extended support. The upgrade policy of an EKS cluster determines what happens when a cluster reaches the end of the standard _support period_. If a cluster upgrade policy has extended support disabled, it will be automatically upgraded to the next [.noloc]`Kubernetes` version. + +For more information about upgrade policies, see <>. + +[IMPORTANT] +==== + +You cannot disable extended support once your cluster has entered it. You can only disable extended support for clusters on standard support. + +{aws} recommends upgrading your cluster to a version in the standard support period. + +==== + +[[disable-support-policy-console,disable-support-policy-console.title]] +== Disable EKS extended support ({aws} Console) +. Navigate to your EKS cluster in the {aws} Console. Select the *Overview* tab on the *Cluster Info* page. +. In the *Kubernetes version setting* section, select *Manage*. +. Select *Standard support* and then *Save changes*. + + +[[disable-support-policy-cli,disable-support-policy-cli.title]] +== Disable EKS extended support ({aws} CLI) +. Verify the {aws} CLI is installed and you are logged in. link:cli/latest/userguide/getting-started-install.html[Learn how to update and install the {aws} CLI.,type="documentation"] +. Determine the name of your EKS cluster. +. Run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-cluster-config \ +--name \ +--upgrade-policy supportType=STANDARD +---- diff --git a/latest/ug/clusters/disable-windows-support.adoc b/latest/ug/clusters/disable-windows-support.adoc new file mode 100644 index 00000000..e884af94 --- /dev/null +++ b/latest/ug/clusters/disable-windows-support.adoc @@ -0,0 +1,28 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[disable-windows-support,disable-windows-support.title]] += Disable [.noloc]`Windows` support +:info_doctype: section +:info_title: Disable Windows support + +. If your cluster contains Amazon Linux nodes and you use xref:security-groups-for-pods[security groups for Pods,linkend=security-groups-for-pods] with them, then skip this step. ++ +Remove the `AmazonVPCResourceController` managed IAM policy from your <>. Replace [.replaceable]`eksClusterRole` with the name of your cluster role and [.replaceable]`111122223333` with your account ID. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam detach-role-policy \ + --role-name eksClusterRole \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKSVPCResourceController +---- +. Disable [.noloc]`Windows` IPAM in the `amazon-vpc-cni` ConfigMap. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl patch configmap/amazon-vpc-cni \ + -n kube-system \ + --type merge \ + -p '{"data":{"enable-windows-ipam":"false"}}' +---- diff --git a/latest/ug/clusters/enable-extended-support.adoc b/latest/ug/clusters/enable-extended-support.adoc new file mode 100644 index 00000000..2e6dd4b7 --- /dev/null +++ b/latest/ug/clusters/enable-extended-support.adoc @@ -0,0 +1,42 @@ +include::../attributes.txt[] +[.topic] +[[enable-extended-support,enable-extended-support.title]] += Add flexibility to plan Kubernetes version upgrades by enabling EKS extended support +:info_titleabbrev: Enable extended support + +This topic describes how to set the _upgrade policy_ of an EKS cluster to enable extended support. The upgrade policy of an EKS cluster determines what happens when a cluster reaches the end of the standard _support period_. If a cluster upgrade policy has extended support enabled, it will enter the extended support period at the end of the standard support period. The cluster will not be automatically upgraded at the end of the standard support period. + +Clusters actually in the _extended support period_ incur higher costs. If a cluster merely has the upgrade policy set to enable extended support, and is otherwise in the _standard support period_, it incurs standard costs. + +EKS Clusters have the upgrade policy set to enable extended support by default. + +For more information about upgrade policies, see <>. + +[IMPORTANT] +==== + +If you want your cluster to stay on its current [.noloc]`Kubernetes` version to take advantage of the extended support period, you must enable the extended support upgrade policy before the end of standard support period. + +If you do not enable extended support, your cluster will be automatically upgraded. + +==== + +[[enable-support-policy-console,enable-support-policy-console.title]] +== Enable EKS extended support ({aws} Console) +. Navigate to your EKS cluster in the {aws} Console. Select the *Overview* tab on the *Cluster Info* page. +. In the *Kubernetes version settings* section, select *Manage*. +. Select *Extended support* and then *Save changes*. + + +[[enable-support-policy-cli,enable-support-policy-cli.title]] +== Enable EKS extended support ({aws} CLI) +. Verify the {aws} CLI is installed and you are logged in. link:cli/latest/userguide/getting-started-install.html[Learn how to update and install the {aws} CLI.,type="documentation"] +. Determine the name of your EKS cluster. +. Run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-cluster-config \ +--name \ +--upgrade-policy supportType=EXTENDED +---- diff --git a/latest/ug/clusters/images b/latest/ug/clusters/images new file mode 120000 index 00000000..5e675731 --- /dev/null +++ b/latest/ug/clusters/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/latest/ug/clusters/kubernetes-versions-extended.adoc b/latest/ug/clusters/kubernetes-versions-extended.adoc new file mode 100644 index 00000000..7974fd6a --- /dev/null +++ b/latest/ug/clusters/kubernetes-versions-extended.adoc @@ -0,0 +1,161 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[kubernetes-versions-extended,kubernetes-versions-extended.title]] += Review release notes for [.noloc]`Kubernetes` versions on extended support +:info_titleabbrev: Extended support versions + +[abstract] +-- +This topic gives important changes to be aware of for each [.noloc]`Kubernetes` version in extended support. +-- + +This topic gives important changes to be aware of for each [.noloc]`Kubernetes` version in extended support. When upgrading, carefully review the changes that have occurred between the old and new versions for your cluster. + +[[kubernetes-1.28,kubernetes-1.28.title]] +== [.noloc]`Kubernetes` 1.28 + +[.noloc]`Kubernetes` `1.28` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.28`, see the https://kubernetes.io/blog/2023/08/15/kubernetes-v1-28-release/[official release announcement]. + + + +* [.noloc]`Kubernetes` `v1.28` expanded the supported skew between core node and control plane components by one minor version, from `n-2` to `n-3`, so that node components (``kubelet`` and `kube-proxy`) for the oldest supported minor version can work with control plane components (``kube-apiserver``, `kube-scheduler`, `kube-controller-manager`, `cloud-controller-manager`) for the newest supported minor version. +* Metrics `force_delete_pods_total` and `force_delete_pod_errors_total` in the `Pod GC Controller` are enhanced to account for all forceful pods deletion. A reason is added to the metric to indicate whether the pod is forcefully deleted because it's terminated, orphaned, terminating with the out-of-service taint, or terminating and unscheduled. +* The `PersistentVolume (PV)` controller has been modified to automatically assign a default `StorageClass` to any unbound `PersistentVolumeClaim` with the `storageClassName` not set. Additionally, the `PersistentVolumeClaim` admission validation mechanism within the API server has been adjusted to allow changing values from an unset state to an actual `StorageClass` name. + +For the complete [.noloc]`Kubernetes` `1.28` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#changelog-since-v1270. + +[[kubernetes-1.27,kubernetes-1.27.title]] +== [.noloc]`Kubernetes` 1.27 + +[.noloc]`Kubernetes` `1.27` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.27`, see the https://kubernetes.io/blog/2023/04/11/kubernetes-v1-27-release/[official release announcement]. + +[IMPORTANT] +==== + + +* The support for the alpha `seccomp` annotations `seccomp.security.alpha.kubernetes.io/pod` and `container.seccomp.security.alpha.kubernetes.io` annotations was removed. The alpha `seccomp` annotations was deprecated in `1.19`, and with their removal in `1.27`, `seccomp` fields will no longer auto-populate for `Pods` with `seccomp` annotations. Instead, use the `securityContext.seccompProfile` field for `Pods` or containers to configure `seccomp` profiles. To check whether you are using the deprecated alpha `seccomp` annotations in your cluster, run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods --all-namespaces -o json | grep -E 'seccomp.security.alpha.kubernetes.io/pod|container.seccomp.security.alpha.kubernetes.io' +---- +* The `--container-runtime` command line argument for the `kubelet` was removed. The default container runtime for Amazon EKS has been `containerd` since `1.24`, which eliminates the need to specify the container runtime. From `1.27` onwards, Amazon EKS will ignore the `--container-runtime` argument passed to any bootstrap scripts. It is important that you don't pass this argument to `--kubelet-extra-args` in order to prevent errors during the node bootstrap process. You must remove the `--container-runtime` argument from all of your node creation workflows and build scripts. + +==== + +* The `kubelet` in [.noloc]`Kubernetes` `1.27` increased the default `kubeAPIQPS` to `50` and `kubeAPIBurst` to `100`. These enhancements allow the `kubelet` to handle a higher volume of API queries, improving response times and performance. When the demands for `Pods` increase, due to scaling requirements, the revised defaults ensure that the `kubelet` can efficiently manage the increased workload. As a result, `Pod` launches are quicker and cluster operations are more effective. +* You can use more fine grained `Pod` topology to spread policies such as `minDomain`. This parameter gives you the ability to specify the minimum number of domains your `Pods` should be spread across. `nodeAffinityPolicy` and `nodeTaintPolicy` allow for an extra level of granularity in governing `Pod` distribution. This is in accordance to node affinities, taints, and the `matchLabelKeys` field in the `topologySpreadConstraints` of your `Pod's` specification. This permits the selection of `Pods` for spreading calculations following a rolling upgrade. +* [.noloc]`Kubernetes` `1.27` promoted to beta a new policy mechanism for `StatefulSets` that controls the lifetime of their `PersistentVolumeClaims`(`PVCs`). The new `PVC` retention policy lets you specify if the `PVCs` generated from the `StatefulSet` spec template will be automatically deleted or retained when the `StatefulSet` is deleted or replicas in the `StatefulSet` are scaled down. +* The https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/[goaway-chance] option in the [.noloc]`Kubernetes` API server helps prevent `HTTP/2` client connections from being stuck on a single API server instance, by randomly closing a connection. When the connection is closed, the client will try to reconnect, and will likely land on a different API server as a result of load balancing. Amazon EKS version `1.27` has enabled `goaway-chance` flag. If your workload running on Amazon EKS cluster uses a client that is not compatible with https://www.rfc-editor.org/rfc/rfc7540#section-6.8[HTTP GOAWAY], we recommend that you update your client to handle `GOAWAY` by reconnecting on connection termination. + +For the complete [.noloc]`Kubernetes` `1.27` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1260. + +[[kubernetes-1.26,kubernetes-1.26.title]] +== [.noloc]`Kubernetes` 1.26 + +[.noloc]`Kubernetes` `1.26` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.26`, see the https://kubernetes.io/blog/2022/12/09/kubernetes-v1-26-release/[official release announcement]. + +[IMPORTANT] +==== + +[.noloc]`Kubernetes` `1.26` no longer supports [.noloc]`CRI` `v1alpha2`. This results in the `kubelet` no longer registering the node if the container runtime doesn't support [.noloc]`CRI` `v1`. This also means that [.noloc]`Kubernetes` `1.26` doesn't support containerd minor version `1.5` and earlier. If you're using containerd, you need to upgrade to containerd version `1.6.0` or later before you upgrade any nodes to [.noloc]`Kubernetes` `1.26`. You also need to upgrade any other container runtimes that only support the `v1alpha2`. For more information, defer to the container runtime vendor. By default, [.noloc]`Amazon Linux` and [.noloc]`Bottlerocket` AMIs include containerd version `1.6.6`. + +==== + +* Before you upgrade to [.noloc]`Kubernetes` `1.26`, upgrade your [.noloc]`Amazon VPC CNI plugin for Kubernetes` to version `1.12` or later. If you don't upgrade to [.noloc]`Amazon VPC CNI plugin for Kubernetes` version `1.12` or later, the [.noloc]`Amazon VPC CNI plugin for Kubernetes` will crash. For more information, see <>. +* The https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/[goaway-chance] option in the [.noloc]`Kubernetes` API server helps prevent `HTTP/2` client connections from being stuck on a single API server instance, by randomly closing a connection. When the connection is closed, the client will try to reconnect, and will likely land on a different API server as a result of load balancing. Amazon EKS version `1.26` has enabled `goaway-chance` flag. If your workload running on Amazon EKS cluster uses a client that is not compatible with https://www.rfc-editor.org/rfc/rfc7540#section-6.8[HTTP GOAWAY], we recommend that you update your client to handle `GOAWAY` by reconnecting on connection termination. + +For the complete [.noloc]`Kubernetes` `1.26` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#changelog-since-v1250. + +[[kubernetes-1.25,kubernetes-1.25.title]] +== [.noloc]`Kubernetes` 1.25 + +[.noloc]`Kubernetes` `1.25` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.25`, see the https://kubernetes.io/blog/2022/08/23/kubernetes-v1-25-release/[official release announcement]. + +[IMPORTANT] +==== + + +* Amazon EC2 `P2` instances aren't supported on Amazon EKS because they require `NVIDIA` driver version 470 or earlier. +* `PodSecurityPolicy` ([.noloc]`PSP`) is removed in [.noloc]`Kubernetes` `1.25`. [.noloc]`PSPs` are replaced with https://kubernetes.io/docs/concepts/security/pod-security-admission/[Pod Security Admission (PSA)] and Pod Security Standards [.noloc]`(PSS)`. [.noloc]`PSA` is a built-in admission controller that implements the security controls outlined in the https://kubernetes.io/docs/concepts/security/pod-security-standards/[PSS]. [.noloc]`PSA` and [.noloc]`PSS` are graduated to stable in [.noloc]`Kubernetes` `1.25` and are enabled in Amazon EKS by default. If you have [.noloc]`PSPs` in your cluster, make sure to migrate from [.noloc]`PSP` to the built-in [.noloc]`Kubernetes` [.noloc]`PSS` or to a policy-as-code solution before upgrading your cluster to version `1.25`. If you don't migrate from PSP, you might encounter interruptions to your workloads. For more information, see the xref:pod-security-policy-removal-faq[Migrate from legacy pod security policies (PSP),linkend=pod-security-policy-removal-faq]. +* [.noloc]`Kubernetes` version `1.25` contains changes that alter the behavior of an existing feature known as API Priority and Fairness (APF). APF serves to shield the API server from potential overload during periods of heightened request volumes. It does this by placing restrictions on the number of concurrent requests that can be processed at any given time. This is achieved through the application of distinct priority levels and limits to requests originating from various workloads or users. This approach ensures that critical applications or high-priority requests receive preferential treatment, while simultaneously preventing lower priority requests from overwhelming the API server. For more information, see https://kubernetes.io/docs/concepts/cluster-administration/flow-control/[API Priority and Fairness] in the [.noloc]`Kubernetes` documentation or https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#api-priority-and-fairness[API Priority and Fairness] in the EKS Best Practices Guide. ++ +These updates were introduced in https://github.com/kubernetes/kubernetes/pull/103521[PR #10352] and https://github.com/kubernetes/kubernetes/pull/118601[PR #118601]. Previously, APF treated all types of requests uniformly, with each request consuming a single unit of the concurrent request limit. The APF behavior change assigns higher units of concurrency to `LIST` requests due to the exceptionally heavy burden put on the API server by these requests. The API server estimates the number of objects that will be returned by a `LIST` request. It assigns a unit of concurrency that is proportional to the number of objects returned. ++ +Upon upgrading to Amazon EKS version `1.25` or higher, this updated behavior might cause workloads with heavy `LIST` requests (that previously functioned without issue) to encounter rate limiting. This would be indicated by an HTTP 429 response code. To avoid potential workload disruption due to `LIST` requests being rate limited, we strongly encourage you to restructure your workloads to reduce the rate of these requests. Alternatively, you can address this issue by adjusting the APF settings to allocate more capacity for essential requests while reducing the capacity allocated to non-essential ones. For more information about these mitigation techniques, see https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#preventing-dropped-requests[Preventing Dropped Requests] in the EKS Best Practices Guide. +* Amazon EKS `1.25` includes enhancements to cluster authentication that contain updated [.noloc]`YAML` libraries. If a [.noloc]`YAML` value in the `aws-auth` `ConfigMap` found in the `kube-system` namespace starts with a macro, where the first character is a curly brace, you should add quotation marks (`" "`) before and after the curly braces (`{ }`). This is required to ensure that `aws-iam-authenticator` version `v0.6.3` accurately parses the `aws-auth` `ConfigMap` in Amazon EKS `1.25`. +* The beta API version (`discovery.k8s.io/v1beta1`) of `EndpointSlice` was deprecated in [.noloc]`Kubernetes` `1.21` and is no longer served as of [.noloc]`Kubernetes` `1.25`. This API has been updated to `discovery.k8s.io/v1`. For more information, see https://kubernetes.io/docs/reference/using-api/deprecation-guide/#endpointslice-v125[EndpointSlice] in the [.noloc]`Kubernetes` documentation. The [.noloc]`{aws} Load Balancer Controller` `v2.4.6` and earlier used the `v1beta1` endpoint to communicate with `EndpointSlices`. If you're using the `EndpointSlices` configuration for the [.noloc]`{aws} Load Balancer Controller`, you must upgrade to [.noloc]`{aws} Load Balancer Controller` `v2.4.7` _before_ upgrading your Amazon EKS cluster to `1.25`. If you upgrade to `1.25` while using the `EndpointSlices` configuration for the [.noloc]`{aws} Load Balancer Controller`, the controller will crash and result in interruptions to your workloads. To upgrade the controller, see <>. +* The beta API version (`autoscaling/v2beta1`) of HorizontalPodAutoscaler is no longer served as of Kubernetes `1.25`. This API was deprecated in version `1.23`. Migrate manifests and API clients to use the `autoscaling/v2` HorizontalPodAutoscaler API version. For more information, see https://kubernetes.io/docs/reference/using-api/deprecation-guide/#horizontalpodautoscaler-v125[the Kubernetes documentation]. + +==== + +* `SeccompDefault` is promoted to beta in [.noloc]`Kubernetes` `1.25`. By setting the `--seccomp-default` flag when you configure `kubelet`, the container runtime uses its `RuntimeDefaultseccomp` profile, rather than the unconfined (`seccomp disabled`) mode. The default profiles provide a strong set of security defaults, while preserving the functionality of the workload. Although this flag is available, Amazon EKS doesn't enable this flag by default, so Amazon EKS behavior is effectively unchanged. If you want to, you can start enabling this on your nodes. For more details, see the tutorial https://kubernetes.io/docs/tutorials/security/seccomp/#enable-the-use-of-runtimedefault-as-the-default-seccomp-profile-for-all-workloads/[Restrict a Container's Syscalls with seccomp] in the [.noloc]`Kubernetes` documentation. +* Support for the Container Runtime Interface (CRI) for [.noloc]`Docker` (also known as [.noloc]`dockershim`) was removed from [.noloc]`Kubernetes` `1.24` and later. The only container runtime in Amazon EKS official [.noloc]`AMIs` for [.noloc]`Kubernetes` `1.24` and later clusters is [.noloc]`containerd`. Before upgrading to Amazon EKS `1.24` or later, remove any reference to bootstrap script flags that aren't supported anymore. For more information, see <>. +* The support for wildcard queries was deprecated in [.noloc]`CoreDNS` `1.8.7` and removed in [.noloc]`CoreDNS` `1.9`. This was done as a security measure. Wildcard queries no longer work and return [.noloc]`NXDOMAIN` instead of an IP address. +* The https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/[goaway-chance] option in the [.noloc]`Kubernetes` API server helps prevent `HTTP/2` client connections from being stuck on a single API server instance, by randomly closing a connection. When the connection is closed, the client will try to reconnect, and will likely land on a different API server as a result of load balancing. Amazon EKS version `1.25` has enabled `goaway-chance` flag. If your workload running on Amazon EKS cluster uses a client that is not compatible with https://www.rfc-editor.org/rfc/rfc7540#section-6.8[HTTP GOAWAY], we recommend that you update your client to handle `GOAWAY` by reconnecting on connection termination. + +For the complete [.noloc]`Kubernetes` `1.25` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#changelog-since-v1240. + +[[kubernetes-1.24,kubernetes-1.24.title]] +== [.noloc]`Kubernetes` 1.24 + +[.noloc]`Kubernetes` `1.24` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.24`, see the https://kubernetes.io/blog/2022/05/03/kubernetes-1-24-release-announcement/[official release announcement]. + +[IMPORTANT] +==== + + +* Starting with [.noloc]`Kubernetes` `1.24`, new beta APIs aren't enabled in clusters by default. By default, existing beta APIs and new versions of existing beta APIs continue to be enabled. Amazon EKS follows the same behavior as upstream [.noloc]`Kubernetes` `1.24`. The feature gates that control new features for both new and existing API operations are enabled by default. This is in alignment with upstream [.noloc]`Kubernetes`. For more information, see https://github.com/kubernetes/enhancements/blob/master/keps/sig-architecture/3136-beta-apis-off-by-default/README.md[KEP-3136: Beta APIs Are Off by Default] on GitHub. +* Support for Container Runtime Interface (CRI) for [.noloc]`Docker` (also known as `dockershim`) is removed from [.noloc]`Kubernetes` `1.24`. Amazon EKS official AMIs have [.noloc]`containerd` as the only runtime. Before moving to Amazon EKS `1.24` or higher, you must remove any reference to bootstrap script flags that aren't supported anymore. You must also make sure that IP forwarding is enabled for your worker nodes. For more information, see <>. +* If you already have [.noloc]`Fluentd` configured for [.noloc]`Container Insights`, then you must migrate [.noloc]`Fluentd` to [.noloc]`Fluent Bit` before updating your cluster. The [.noloc]`Fluentd` parsers are configured to only parse log messages in JSON format. Unlike `dockerd`, the `containerd` container runtime has log messages that aren't in JSON format. If you don't migrate to [.noloc]`Fluent Bit`, some of the configured [.noloc]`Fluentd's` parsers will generate a massive amount of errors inside the [.noloc]`Fluentd` container. For more information on migrating, see link:AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html[Set up Fluent Bit as a DaemonSet to send logs to CloudWatch Logs,type="documentation"]. +* In [.noloc]`Kubernetes` `1.23` and earlier, `kubelet` serving certificates with unverifiable IP and DNS Subject Alternative Names (SANs) are automatically issued with unverifiable SANs. These unverifiable SANs are omitted from the provisioned certificate. In version `1.24` and later clusters, `kubelet` serving certificates aren't issued if any SAN can't be verified. This prevents `kubectl` exec and `kubectl` logs commands from working. For more information, see <>. +* When upgrading an Amazon EKS `1.23` cluster that uses [.noloc]`Fluent Bit`, you must make sure that it's running `k8s/1.3.12` or later. You can do this by reapplying the latest applicable [.noloc]`Fluent Bit` YAML file from [.noloc]`GitHub`. For more information, see link:AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html#Container-Insights-FluentBit-setup[Setting up Fluent Bit,type="documentation"] in the _Amazon CloudWatch User Guide_. + +==== + +* You can use Topology Aware Hints to indicate your preference for keeping traffic in zone when cluster worker nodes are deployed across multiple availability zones. Routing traffic within a zone can help reduce costs and improve network performance. By default, Topology Aware Hints are enabled in Amazon EKS `1.24`. For more information, see https://kubernetes.io/docs/concepts/services-networking/topology-aware-hints/[Topology Aware Hints] in the [.noloc]`Kubernetes` documentation. +* The `PodSecurityPolicy` ([.noloc]`PSP`) is scheduled for removal in [.noloc]`Kubernetes` `1.25`. [.noloc]`PSPs` are being replaced with https://kubernetes.io/docs/concepts/security/pod-security-admission/[Pod Security Admission (PSA)]. PSA is a built-in admission controller that uses the security controls that are outlined in the https://kubernetes.io/docs/concepts/security/pod-security-standards/[Pod Security Standards (PSS)]. PSA and PSS are both beta features and are enabled in Amazon EKS by default. To address the removal of [.noloc]`PSP` in version `1.25`, we recommend that you implement PSS in Amazon EKS. For more information, see link:containers/implementing-pod-security-standards-in-amazon-eks[Implementing Pod Security Standards in Amazon EKS,type="blog"] on the {aws} blog. +* The `client.authentication.k8s.io/v1alpha1` ExecCredential is removed in [.noloc]`Kubernetes` `1.24`. The ExecCredential API was generally available in [.noloc]`Kubernetes` `1.22`. If you use a client-go credential plugin that relies on the `v1alpha1` API, contact the distributor of your plugin on how to migrate to the `v1` API. +* For [.noloc]`Kubernetes` `1.24`, we contributed a feature to the upstream Cluster Autoscaler project that simplifies scaling Amazon EKS managed node groups to and from zero nodes. Previously, for the Cluster Autoscaler to understand the resources, labels, and taints of a managed node group that was scaled to zero nodes, you needed to tag the underlying Amazon EC2 Auto Scaling group with the details of the nodes that it was responsible for. Now, when there are no running nodes in the managed node group, the Cluster Autoscaler calls the Amazon EKS `DescribeNodegroup` API operation. This API operation provides the information that the Cluster Autoscaler requires of the managed node group's resources, labels, and taints. This feature requires that you add the `eks:DescribeNodegroup` permission to the Cluster Autoscaler service account IAM policy. When the value of a Cluster Autoscaler tag on the Auto Scaling group powering an Amazon EKS managed node group conflicts with the node group itself, the Cluster Autoscaler prefers the value of the Auto Scaling group tag. This is so that you can override values as needed. For more information, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler]. +* If you intend to use [.noloc]`Inferentia` or [.noloc]`Trainium` instance types with Amazon EKS `1.24`, you must upgrade to the {aws} [.noloc]`Neuron` device plugin version 1.9.3.0 or later. For more information, see https://awsdocs-neuron.readthedocs-hosted.com/en/latest/release-notes/containers/neuron-k8.html#id46[Neuron K8 release [1.9.3.0]] in the {aws} [.noloc]`Neuron` Documentation. +* `Containerd` has `IPv6` enabled for [.noloc]`Pods`, by default. It applies node kernel settings to [.noloc]`Pod` network namespaces. Because of this, containers in a [.noloc]`Pod` bind to both `IPv4` (`127.0.0.1`) and `IPv6` (`::1`) loopback addresses. `IPv6` is the default protocol for communication. Before updating your cluster to version `1.24`, we recommend that you test your multi-container [.noloc]`Pods`. Modify apps so that they can bind to all IP addresses on loopback interfaces. The majority of libraries enable `IPv6` binding, which is backward compatible with `IPv4`. When it's not possible to modify your application code, you have two options: ++ +** Run an `init` container and set `disable ipv6` to `true` (`sysctl -w net.ipv6.conf.all.disable_ipv6=1`). +** Configure a https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook[mutating admission webhook] to inject an `init` container alongside your application [.noloc]`Pods`. + ++ +If you need to block `IPv6` for all [.noloc]`Pods` across all nodes, you might have to disable `IPv6` on your instances. +* The https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/[goaway-chance] option in the [.noloc]`Kubernetes` API server helps prevent `HTTP/2` client connections from being stuck on a single API server instance, by randomly closing a connection. When the connection is closed, the client will try to reconnect, and will likely land on a different API server as a result of load balancing. Amazon EKS version `1.24` has enabled `goaway-chance` flag. If your workload running on Amazon EKS cluster uses a client that is not compatible with https://www.rfc-editor.org/rfc/rfc7540#section-6.8[HTTP GOAWAY], we recommend that you update your client to handle `GOAWAY` by reconnecting on connection termination. + +For the complete [.noloc]`Kubernetes` `1.24` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md#changelog-since-v1230. + +[[kubernetes-1.23,kubernetes-1.23.title]] +== [.noloc]`Kubernetes` 1.23 + +[.noloc]`Kubernetes` `1.23` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.23`, see the https://kubernetes.io/blog/2021/12/07/kubernetes-1-23-release-announcement/[official release announcement]. + +[IMPORTANT] +==== + + +* The [.noloc]`Kubernetes` in-tree to container storage interface (CSI) volume migration feature is enabled. This feature enables the replacement of existing [.noloc]`Kubernetes` in-tree storage plugins for Amazon EBS with a corresponding Amazon EBS CSI driver. For more information, see https://kubernetes.io/blog/2019/12/09/kubernetes-1-17-feature-csi-migration-beta/[Kubernetes 1.17 Feature: Kubernetes In-Tree to CSI Volume Migration Moves to Beta] on the [.noloc]`Kubernetes` blog. ++ +The feature translates in-tree APIs to equivalent CSI APIs and delegates operations to a replacement CSI driver. With this feature, if you use existing `StorageClass`, `PersistentVolume`, and `PersistentVolumeClaim` objects that belong to these workloads, there likely won't be any noticeable change. The feature enables [.noloc]`Kubernetes` to delegate all storage management operations from the in-tree plugin to the CSI driver. If you use Amazon EBS volumes in an existing cluster, install the Amazon EBS CSI driver in your cluster before you update your cluster to version `1.23`. If you don't install the driver before updating an existing cluster, interruptions to your workloads might occur. If you plan to deploy workloads that use Amazon EBS volumes in a new `1.23` cluster, install the Amazon EBS CSI driver in your cluster before deploying the workloads your cluster. For instructions on how to install the Amazon EBS CSI driver on your cluster, see <>. For frequently asked questions about the migration feature, see <>. +* Extended Support for Amazon EKS optimized [.noloc]`Windows` AMIs that are published by {aws} isn't available for [.noloc]`Kubernetes` version `1.23` but is available for [.noloc]`Kubernetes` version `1.24` and higher. + +==== + +* [.noloc]`Kubernetes` stopped supporting `dockershim` in version `1.20` and removed `dockershim` in version `1.24`. For more information, see https://kubernetes.io/blog/2022/01/07/kubernetes-is-moving-on-from-dockershim/[Kubernetes is Moving on From Dockershim: Commitments and Next Steps] in the [.noloc]`Kubernetes` blog. Amazon EKS will end support for `dockershim` starting in Amazon EKS version `1.24`. Starting with Amazon EKS version `1.24`, Amazon EKS official AMIs will have `containerd` as the only runtime. ++ +Even though Amazon EKS version `1.23` continues to support `dockershim`, we recommend that you start testing your applications now to identify and remove any [.noloc]`Docker` dependencies. This way, you are prepared to update your cluster to version `1.24`. For more information about `dockershim` removal, see <>. +* [.noloc]`Kubernetes` graduated `IPv4`/``IPv6`` dual-stack networking for [.noloc]`Pods`, services, and nodes to general availability. However, Amazon EKS and the [.noloc]`Amazon VPC CNI plugin for Kubernetes` don't support dual-stack networking. Your clusters can assign `IPv4` or `IPv6` addresses to [.noloc]`Pods` and services, but can't assign both address types. +* [.noloc]`Kubernetes` graduated the Pod Security Admission (PSA) feature to beta. The feature is enabled by default. For more information, see https://kubernetes.io/docs/concepts/security/pod-security-admission/[Pod Security Admission] in the [.noloc]`Kubernetes` documentation. PSA replaces the https://aws.github.io/aws-eks-best-practices/security/docs/pods/#pod-security-solutions[Pod Security Policy] ([.noloc]`PSP`) admission controller. The PSP admission controller isn't supported and is scheduled for removal in [.noloc]`Kubernetes` version `1.25`. ++ +The [.noloc]`PSP` admission controller enforces [.noloc]`Pod` security standards on [.noloc]`Pods` in a namespace based on specific namespace labels that set the enforcement level. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/pods/#pod-security-standards-pss-and-pod-security-admission-psa[Pod Security Standards (PSS) and Pod Security Admission (PSA)] in the Amazon EKS best practices guide. +* The `kube-proxy` image deployed with clusters is now the https://gallery.ecr.aws/eks-distro-build-tooling/eks-distro-minimal-base-iptables[minimal base image] maintained by Amazon EKS Distro (EKS-D). The image contains minimal packages and doesn't have shells or package managers. +* [.noloc]`Kubernetes` graduated ephemeral containers to beta. Ephemeral containers are temporary containers that run in the same namespace as an existing [.noloc]`Pod`. You can use them to observe the state of [.noloc]`Pods` and containers for troubleshooting and debugging purposes. This is especially useful for interactive troubleshooting when `kubectl exec` is insufficient because either a container has crashed or a container image doesn't include debugging utilities. An example of a container that includes a debugging utility is https://github.com/GoogleContainerTools/distroless#distroless-container-images[distroless images]. For more information, see https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#ephemeral-container[Debugging with an ephemeral debug container] in the [.noloc]`Kubernetes` documentation. +* [.noloc]`Kubernetes` graduated the `HorizontalPodAutoscaler` `autoscaling/v2` stable API to general availability. The `HorizontalPodAutoscaler` `autoscaling/v2beta2` API is deprecated. It will be unavailable in `1.26`. +* The https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/[goaway-chance] option in the [.noloc]`Kubernetes` API server helps prevent `HTTP/2` client connections from being stuck on a single API server instance, by randomly closing a connection. When the connection is closed, the client will try to reconnect, and will likely land on a different API server as a result of load balancing. Amazon EKS version `1.23` has enabled `goaway-chance` flag. If your workload running on Amazon EKS cluster uses a client that is not compatible with https://www.rfc-editor.org/rfc/rfc7540#section-6.8[HTTP GOAWAY], we recommend that you update your client to handle `GOAWAY` by reconnecting on connection termination. + +For the complete [.noloc]`Kubernetes` `1.23` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md#changelog-since-v1220. diff --git a/latest/ug/clusters/kubernetes-versions-standard.adoc b/latest/ug/clusters/kubernetes-versions-standard.adoc new file mode 100644 index 00000000..ad1034fc --- /dev/null +++ b/latest/ug/clusters/kubernetes-versions-standard.adoc @@ -0,0 +1,88 @@ +//!!NODE_ROOT
+ +[.topic] +[[kubernetes-versions-standard,kubernetes-versions-standard.title]] += Review release notes for [.noloc]`Kubernetes` versions on standard support +:info_titleabbrev: Standard support versions + +include::../attributes.txt[] + +[abstract] +-- +This topic gives important changes to be aware of for each [.noloc]`Kubernetes` version in standard support. +-- + +This topic gives important changes to be aware of for each [.noloc]`Kubernetes` version in standard support. When upgrading, carefully review the changes that have occurred between the old and new versions for your cluster. + +[NOTE] +==== + +For `1.24` and later clusters, officially published Amazon EKS AMIs include `containerd` as the only runtime. [.noloc]`Kubernetes` versions earlier than `1.24` use [.noloc]`Docker` as the default runtime. These versions have a bootstrap flag option that you can use to test out your workloads on any supported cluster with `containerd`. For more information, see <>. + +==== + + +[[kubernetes-1.31,kubernetes-1.31.title]] +== [.noloc]`Kubernetes` 1.31 + +[.noloc]`Kubernetes` `1.31` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.31`, see the https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/[official release announcement]. + +[IMPORTANT] +==== + + +* The kubelet flag `--keep-terminated-pod-volumes` deprecated since 2017 has been removed as part of the `v1.31` release. This change impacts how terminated pod volumes are handled by the kubelet. If you are using this flag in your node configurations, you must update your bootstrap scripts and launch templates to remove it before upgrading. + +==== + +* The beta `VolumeAttributesClass` feature gate and API resource is enabled in Amazon EKS `v1.31`. This feature allows cluster operators to modify mutable properties of Persistent Volumes (PVs) managed by compatible CSI Drivers, including the Amazon EBS CSI Driver. To leverage this feature, ensure that your CSI Driver supports the `VolumeAttributesClass` feature (for the Amazon EBS CSI Driver, upgrade to version `v1.35.0` or later to automatically enable the feature). You will be able to create `VolumeAttributesClass` objects to define the desired volume attributes, such as volume type and throughput, and associate them with your Persistent Volume Claims (PVCs). See the https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/[official Kubernetes documentation] as well as the documentation of your CSI driver for more information. +** For more information about the Amazon EBS CSI Driver, see <>. +* Kubernetes support for https://apparmor.net/[AppArmor] has graduated to stable and is now generally available for public use. This feature allows you to protect your containers with AppArmor by setting the `appArmorProfile.type` field in the container's `securityContext`. Prior to Kubernetes `v1.30`, AppArmor was controlled by annotations. Starting with `v1.30`, it is controlled using fields. To leverage this feature, we recommend migrating away from annotations and using the `appArmorProfile.type` field to ensure that your workloads are compatible. +* The PersistentVolume last phase transition time feature has graduated to stable and is now generally available for public use in Kubernetes `v1.31`. This feature introduces a new field, `.status.lastTransitionTime`, in the PersistentVolumeStatus, which provides a timestamp of when a PersistentVolume last transitioned to a different phase. This enhancement allows for better tracking and management of PersistentVolumes, particularly in scenarios where understanding the lifecycle of volumes is important. + +For the complete [.noloc]`Kubernetes` `1.31` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md + +[[kubernetes-1.30,kubernetes-1.30.title]] +== [.noloc]`Kubernetes` 1.30 + +[.noloc]`Kubernetes` `1.30` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.30`, see the https://kubernetes.io/blog/2024/04/17/kubernetes-v1-30-release/[official release announcement]. + +[IMPORTANT] +==== + + +* Starting with Amazon EKS version `1.30` or newer, any newly created managed node groups will automatically default to using Amazon Linux 2023 (AL2023) as the node operating system. Previously, new node groups would default to Amazon Linux 2 (AL2). You can continue to use AL2 by choosing it as the AMI type when creating a new node group. ++ +** For more information about Amazon Linux, see link:linux/al2023/ug/compare-with-al2.html[Comparing AL2 and AL2023,type="documentation"] in the Amazon Linux User Guide. +** For more information about specifiying the operating system for a managed node group, see <>. + +==== + +* With Amazon EKS `1.30`, the `topology.k8s.aws/zone-id` label is added to worker nodes. You can use Availability Zone IDs (AZ IDs) to determine the location of resources in one account relative to the resources in another account. For more information, see link:ram/latest/userguide/working-with-az-ids.html[Availability Zone IDs for your {aws} resources,type="documentation"] in the _{aws} RAM User Guide_. +* Starting with `1.30`, Amazon EKS no longer includes the `default` annotation on the `gp2 StorageClass` resource applied to newly created clusters. This has no impact if you are referencing this storage class by name. You must take action if you were relying on having a default `StorageClass` in the cluster. You should reference the `StorageClass` by the name `gp2`. Alternatively, you can deploy the Amazon EBS recommended default storage class by setting the `defaultStorageClass.enabled` parameter to true when installing `v1.31.0` or later of the `aws-ebs-csi-driver add-on`. +* The minimum required IAM policy for the Amazon EKS cluster IAM role has changed. The action `ec2:DescribeAvailabilityZones` is required. For more information, see <>. + +For the complete [.noloc]`Kubernetes` `1.30` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md. + +[[kubernetes-1.29,kubernetes-1.29.title]] +== [.noloc]`Kubernetes` 1.29 + +[.noloc]`Kubernetes` `1.29` is now available in Amazon EKS. For more information about [.noloc]`Kubernetes` `1.29`, see the https://kubernetes.io/blog/2023/12/13/kubernetes-v1-29-release/[official release announcement]. + +[IMPORTANT] +==== + + +* The deprecated `flowcontrol.apiserver.k8s.io/v1beta2` API version of `FlowSchema` and `PriorityLevelConfiguration` are no longer served in [.noloc]`Kubernetes` `v1.29`. If you have manifests or client software that uses the deprecated beta API group, you should change these before you upgrade to `v1.29`. + +==== + +* The `.status.kubeProxyVersion` field for node objects is now deprecated, and the [.noloc]`Kubernetes` project is proposing to remove that field in a future release. The deprecated field is not accurate and has historically been managed by `kubelet` - which does not actually know the `kube-proxy` version, or even whether `kube-proxy` is running. If you've been using this field in client software, stop - the information isn't reliable and the field is now deprecated. +* In [.noloc]`Kubernetes` `1.29` to reduce potential attack surface, the `LegacyServiceAccountTokenCleanUp` feature labels legacy auto-generated secret-based tokens as invalid if they have not been used for a long time (1 year by default), and automatically removes them if use is not attempted for a long time after being marked as invalid (1 additional year by default). To identify such tokens, a you can run: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get cm kube-apiserver-legacy-service-account-token-tracking -n kube-system +---- + +For the complete [.noloc]`Kubernetes` `1.29` changelog, see https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#changelog-since-v1280. diff --git a/latest/ug/clusters/kubernetes-versions.adoc b/latest/ug/clusters/kubernetes-versions.adoc new file mode 100644 index 00000000..fdf3e63e --- /dev/null +++ b/latest/ug/clusters/kubernetes-versions.adoc @@ -0,0 +1,276 @@ +//!!NODE_ROOT
+ +[.topic] +[[kubernetes-versions,kubernetes-versions.title]] += Understand the [.noloc]`Kubernetes` version lifecycle on EKS +:info_doctype: section +:info_title: Understand the Kubernetes version lifecycle on EKS +:info_titleabbrev: Kubernetes versions +:keywords: Amazon EKS, available, Kubernetes, version, release notes +:info_abstract: Learn how Amazon EKS supports Kubernetes versions with standard and extended \ + support periods, allowing you to proactively update clusters with the latest \ + versions, features, and security patches.. + + +include::../attributes.txt[] + +[abstract] +-- +Learn how Amazon EKS supports Kubernetes versions with standard and extended support periods, allowing you to proactively update clusters with the latest versions, features, and security patches.. +-- + +[.noloc]`Kubernetes` rapidly evolves with new features, design updates, and bug fixes. The community releases new [.noloc]`Kubernetes` minor versions (such as `1.30`) on average once every four months. Amazon EKS follows the upstream release and deprecation cycle for minor versions. As new [.noloc]`Kubernetes` versions become available in Amazon EKS, we recommend that you proactively update your clusters to use the latest available version. + +A minor version is under standard support in Amazon EKS for the first 14 months after it's released. Once a version is past the end of standard support date, it enters extended support for the next 12 months. Extended support allows you to stay at a specific [.noloc]`Kubernetes` version for longer at an additional cost per cluster hour. If you haven't updated your cluster before the extended support period ends, your cluster is auto-upgraded to the oldest currently supported extended version. + +Extended support is enabled by default. <> + +We recommend that you create your cluster with the latest available [.noloc]`Kubernetes` version supported by Amazon EKS. If your application requires a specific version of [.noloc]`Kubernetes`, you can select older versions. You can create new Amazon EKS clusters on any version offered in standard or extended support. + + + +video::_dJdAZ_J_jw[youtube,align = center,height = 405,fileref = https://www.youtube.com/embed/_dJdAZ_J_jw,width = 720] + + +[[available-versions,available-versions.title]] +== Available versions on standard support + +The following [.noloc]`Kubernetes` versions are currently available in Amazon EKS standard support: + + +* `1.31` +* `1.30` +* `1.29` + +For important changes to be aware of for each version in standard support, see <>. + +[[available-versions-extended,available-versions-extended.title]] +== Available versions on extended support + +The following [.noloc]`Kubernetes` versions are currently available in Amazon EKS extended support: + + +* `1.28` +* `1.27` +* `1.26` +* `1.25` +* `1.24` + +For important changes to be aware of for each version in extended support, see <>. + +[[kubernetes-release-calendar,kubernetes-release-calendar.title]] +== Amazon EKS [.noloc]`Kubernetes` release calendar + +The following table shows important release and support dates to consider for each [.noloc]`Kubernetes` version. Billing for extended support starts at the beginning of the day that the version reaches end of standard support. + +[NOTE] +==== + +Dates with only a month and a year are approximate and are updated with an exact date when it's known. + +==== + +[cols="1,1,1,1,1", options="header"] +|=== +|Kubernetes version +|Upstream release +|Amazon EKS release +|End of standard support +|End of extended support + + +|`1.31` +|August 13, 2024 +|September 26, 2024 +|November 26, 2025 +|November 26, 2026 + +|`1.30` +|April 17, 2024 +|May 23, 2024 +|July 23, 2025 +|July 23, 2026 + +|`1.29` +|December 13, 2023 +|January 23, 2024 +|March 23, 2025 +|March 23, 2026 + +|`1.28` +|August 15, 2023 +|September 26, 2023 +|November 26, 2024 +|November 26, 2025 + +|`1.27` +|April 11, 2023 +|May 24, 2023 +|July 24, 2024 +|July 24, 2025 + +|`1.26` +|December 9, 2022 +|April 11, 2023 +|June 11, 2024 +|June 11, 2025 + +|`1.25` +|August 23, 2022 +|February 22, 2023 +|May 1, 2024 +|May 1, 2025 + +|`1.24` +|May 3, 2022 +|November 15, 2022 +|January 31, 2024 +|January 31, 2025 + +|=== + +[[version-cli,version-cli.title]] +== Get version information with {AWS} CLI + +You can use the {aws} CLI to get information about Kubernetes versions available on EKS, such as the end date of Standard Support. + +=== To retrieve information about available Kubernetes versions on EKS using the {aws} CLI + +. Open your terminal. +. Ensure you have the {aws} CLI installed and configured. For more information, see link:cli/latest/userguide/getting-started-install.html["Installing or updating to the latest version of the CLI",type="documentation"]. +. Run the following command: ++ +``` +aws eks describe-cluster-versions +``` +. The command will return a JSON output with details about the available cluster versions. Here's an example of the output: ++ +```json +{ + "clusterVersions": [ + { + "clusterVersion": "1.31", + "clusterType": "eks", + "defaultPlatformVersion": "eks.21", + "defaultVersion": true, + "releaseDate": "2024-09-25T17:00:00-07:00", + "endOfStandardSupportDate": "2025-11-25T16:00:00-08:00", + "endOfExtendedSupportDate": "2026-11-25T16:00:00-08:00", + "status": "STANDARD_SUPPORT", + "kubernetesPatchVersion": "1.31.3" + } + ] +} +``` + +*The output provides the following information for each cluster version:* + +* `clusterVersion`: The Kubernetes version of the EKS cluster +* `clusterType`: The type of cluster (e.g., "eks") +* `defaultPlatformVersion`: The default EKS platform version +* `defaultVersion`: Whether this is the default version +* `releaseDate`: The date when this version was released +* `endOfStandardSupportDate`: The date when standard support ends +* `endOfExtendedSupportDate`: The date when extended support ends +* `status`: The current support status of the version, such as `STANDARD_SUPPORT` or `EXTENDED_SUPPORT` +* `kubernetesPatchVersion`: The specific Kubernetes patch version + +[[version-faqs,version-faqs.title]] +== Amazon EKS version FAQs + +*How many [.noloc]`Kubernetes` versions are available in standard support?*:: +In line with the [.noloc]`Kubernetes` community support for [.noloc]`Kubernetes` versions, Amazon EKS is committed to offering support for three [.noloc]`Kubernetes` versions at any given time. We will announce the end of standard support date of a given [.noloc]`Kubernetes` minor version at least 60 days in advance. Because of the Amazon EKS qualification and release process for new [.noloc]`Kubernetes` versions, the end of standard support date of a [.noloc]`Kubernetes` version on Amazon EKS will be after the date that the [.noloc]`Kubernetes` project stops supporting the version upstream. + +*How long does a [.noloc]`Kubernetes` receive standard support by Amazon EKS?*:: +A [.noloc]`Kubernetes` version received standard support for 14 months after first being available on Amazon EKS. This is true even if upstream [.noloc]`Kubernetes` no longer support a version that's available on Amazon EKS. We backport security patches that are applicable to the [.noloc]`Kubernetes` versions that are supported on Amazon EKS. + + +*Am I notified when standard support is ending for a [.noloc]`Kubernetes` version on Amazon EKS?*:: +Yes. If any clusters in your account are running the version nearing the end of support, Amazon EKS sends out a notice through the {aws} Health Dashboard approximately 12 months after the [.noloc]`Kubernetes` version was released on Amazon EKS. The notice includes the end of support date. This is at least 60 days from the date of the notice. + + +*Which [.noloc]`Kubernetes` features are supported by Amazon EKS?*:: +Amazon EKS supports all generally available (GA) features of the [.noloc]`Kubernetes` API. Starting with [.noloc]`Kubernetes` version `1.24`, new beta APIs aren't enabled in clusters by default. However, previously existing beta APIs and new versions of existing beta APIs continue to be enabled by default. Alpha features aren't supported. + + +*Are Amazon EKS managed node groups automatically updated along with the cluster control plane version?*:: +No. A managed node group creates Amazon EC2 instances in your account. These instances aren't automatically upgraded when you or Amazon EKS update your control plane. For more information, see <>. We recommend maintaining the same [.noloc]`Kubernetes` version on your control plane and nodes. + + +*Are self-managed node groups automatically updated along with the cluster control plane version?*:: +No. A self-managed node group includes Amazon EC2 instances in your account. These instances aren't automatically upgraded when you or Amazon EKS update the control plane version on your behalf. A self-managed node group doesn't have any indication in the console that it needs updating. You can view the `kubelet` version installed on a node by selecting the node in the *Nodes* list on the *Overview* tab of your cluster to determine which nodes need updating. You must manually update the nodes. For more information, see <>. ++ +The [.noloc]`Kubernetes` project tests compatibility between the control plane and nodes for up to three minor versions. For example, `1.27` nodes continue to operate when orchestrated by a `1.30` control plane. However, running a cluster with nodes that are persistently three minor versions behind the control plane isn't recommended. For more information, see https://kubernetes.io/docs/setup/version-skew-policy/[Kubernetes version and version skew support policy] in the [.noloc]`Kubernetes` documentation. We recommend maintaining the same [.noloc]`Kubernetes` version on your control plane and nodes. + + +*Are [.noloc]`Pods` running on Fargate automatically upgraded with an automatic cluster control plane version upgrade?*:: +No. We strongly recommend running Fargate [.noloc]`Pods` as part of a replication controller, such as a [.noloc]`Kubernetes` deployment. Then do a rolling restart of all Fargate [.noloc]`Pods`. The new version of the Fargate [.noloc]`Pod` is deployed with a `kubelet` version that's the same version as your updated cluster control plane version. For more information, see https://kubernetes.io/docs/concepts/workloads/controllers/deployment[Deployments] in the [.noloc]`Kubernetes` documentation. ++ +IMPORTANT: If you update the control plane, you must still update the Fargate nodes yourself. To update Fargate nodes, delete the Fargate [.noloc]`Pod` represented by the node and redeploy the [.noloc]`Pod`. The new [.noloc]`Pod` is deployed with a `kubelet` version that's the same version as your cluster. + + +*What Kubernetes versions are supported for hybrid nodes?*:: +Amazon EKS Hybrid Nodes supports the same Kubernetes versions as Amazon EKS clusters with other node compute types, including standard and extended Kubernetes version support. Hybrid nodes are not automatically upgraded when you upgrade your control plane version and you are responsible for upgrading your hybrid nodes. For more information, see <>. + + +[[extended-support-faqs,extended-support-faqs.title]] +== Amazon EKS extended support FAQs + +*The standard support and extended support terminology is new to me. What do those terms mean?*:: +Standard support for a [.noloc]`Kubernetes` version in Amazon EKS begins when a [.noloc]`Kubernetes` version is released on Amazon EKS, and will end 14 months after the release date. Extended support for a [.noloc]`Kubernetes` version will begin immediately after the end of standard support, and will end after the next 12 months. For example, standard support for version `1.23` in Amazon EKS ended on October 11, 2023. Extended support for version `1.23` began on October 12, 2023 and ended on October 11, 2024. + + +*What do I need to do to get extended support for Amazon EKS clusters?*:: +You will need to enable extended support (see <>) for your cluster by changing the cluster upgrade policy to EXTENDED. By default, for all new and existing clusters, the upgrade policy is set to EXTENDED, unless specified otherwise. See <> to view the upgrade policy for your cluster. Standard support will begin when a [.noloc]`Kubernetes` version is released on Amazon EKS, and will end 14 months after the release date. Extended support for a [.noloc]`Kubernetes` version will begin immediately after the end of standard support, and will end after the next 12 months. + + +*For which [.noloc]`Kubernetes` versions can I get extended support?*:: +Extended support is available for [.noloc]`Kubernetes` versions `1.23` and higher. You can run clusters on any version for up to 12 months after the end of standard support for that version. This means that each version will be supported for 26 months in Amazon EKS (14 months of standard support plus 12 months of extended support). + + +*What if I don't want to use extended support?*:: +If you don't want to be automatically enrolled in extended support, you can upgrade your cluster to a [.noloc]`Kubernetes` version that's in standard Amazon EKS support. See <> to learn how to disable extended support. Note: If you disable extended support, your cluster will be auto upgraded at the end of standard support. + + +*What will happen at the end of 12 months of extended support?*:: +Clusters running on a [.noloc]`Kubernetes` version that has completed its 26-month lifecycle (14 months of standard support plus 12 months of extended support) will be auto-upgraded to the next version. The auto-upgrade includes only the Kubernetes control plane. If you have EKS Auto Mode nodes, they may automatically update. Self managed nodes and EKS Managed Node Groups will remain on the previous version. ++ +On the end of extended support date, you can no longer create new Amazon EKS clusters with the unsupported version. Existing control planes are automatically updated by Amazon EKS to the earliest supported version through a gradual deployment process after the end of support date. After the automatic control plane update, make sure to manually update cluster add-ons and Amazon EC2 nodes. For more information, see <>. + + +*When exactly is my control plane automatically updated after the end of extended support date?*:: +Amazon EKS can't provide specific time frames. Automatic updates can happen at any time after the end of extended support date. You won't receive any notification before the update. We recommend that you proactively update your control plane without relying on the Amazon EKS automatic update process. For more information, see <>. + + +*Can I leave my control plane on a [.noloc]`Kubernetes` version indefinitely?*:: +No. Cloud security at {aws} is the highest priority. Past a certain point (usually one year), the [.noloc]`Kubernetes` community stops releasing common vulnerabilities and exposures ([.noloc]`CVE`) patches and discourages CVE submission for unsupported versions. This means that vulnerabilities specific to an older version of [.noloc]`Kubernetes` might not even be reported. This leaves clusters exposed with no notice and no remediation options in the event of a vulnerability. Given this, Amazon EKS doesn't allow control planes to stay on a version that reached end of extended support. + + +*Is there additional cost to get extended support?*:: +Yes, there is additional cost for Amazon EKS clusters running in extended support. For pricing details, see link:containers/amazon-eks-extended-support-for-kubernetes-versions-pricing[Amazon EKS extended support for Kubernetes version pricing,type="blog"] on the {aws} blog or our https://aws.amazon.com/eks/pricing/[pricing page]. + + +*What is included in extended support?*:: +Amazon EKS clusters in Extended Support receive ongoing security patches for the [.noloc]`Kubernetes` control plane. Additionally, Amazon EKS will release patches for the Amazon VPC CNI, `kube-proxy`, and [.noloc]`CoreDNS` add-ons for Extended Support versions. Amazon EKS will also release patches for {aws}-published Amazon EKS optimized AMIs for Amazon Linux, [.noloc]`Bottlerocket`, and Windows, as well as Amazon EKS Fargate nodes for those versions. All clusters in Extended Support will continue to get access to technical support from {aws}. ++ +NOTE: Extended Support for Amazon EKS optimized [.noloc]`Windows` AMIs that are published by {aws} isn't available for [.noloc]`Kubernetes` version `1.23` but is available for [.noloc]`Kubernetes` version `1.24` and higher. + + +*Are there any limitations to patches for non-[.noloc]`Kubernetes` components in extended support?*:: +While Extended Support covers all of the [.noloc]`Kubernetes` specific components from {aws}, it will only provide support for {aws}-published Amazon EKS optimized AMIs for Amazon Linux, [.noloc]`Bottlerocket`, and Windows at all times. This means, you will potentially have newer components (such as OS or kernel) on your Amazon EKS optimized AMI while using Extended Support. For example, once Amazon Linux 2 reaches the link:amazon-linux-2/faqs/[end of its lifecycle in 2025,type="marketing"], the Amazon EKS optimized Amazon Linux AMIs will be built using a newer Amazon Linux OS. Amazon EKS will announce and document important support lifecycle discrepancies such as this for each [.noloc]`Kubernetes` version. + + +*Can I create new clusters using a version on extended support?*:: +Yes. + +include::kubernetes-versions-standard.adoc[leveloffset=+1] + +include::kubernetes-versions-extended.adoc[leveloffset=+1] + +include::view-support-status.adoc[leveloffset=+1] + +include::view-upgrade-policy.adoc[leveloffset=+1] + +include::enable-extended-support.adoc[leveloffset=+1] + +include::disable-extended-support.adoc[leveloffset=+1] diff --git a/latest/ug/clusters/management/cost-monitoring-aws.adoc b/latest/ug/clusters/management/cost-monitoring-aws.adoc new file mode 100644 index 00000000..8c773eae --- /dev/null +++ b/latest/ug/clusters/management/cost-monitoring-aws.adoc @@ -0,0 +1,29 @@ +include::../../attributes.txt[] + +[.topic] +[[cost-monitoring-aws,cost-monitoring-aws.title]] += View costs by pod in {aws} billing with split cost allocation +:info_doctype: section + +.Cost monitoring using {aws} split cost allocation data for Amazon EKS +You can use {aws} split cost allocation data for Amazon EKS to get granular cost visibility for your Amazon EKS clusters. This enables you to analyze, optimize, and chargeback cost and usage for your [.noloc]`Kubernetes` applications. You allocate application costs to individual business units and teams based on Amazon EC2 CPU and memory resources consumed by your [.noloc]`Kubernetes` application. Split cost allocation data for Amazon EKS gives visibility into cost per Pod, and enables you to aggregate the cost data per Pod using namespace, cluster, and other [.noloc]`Kubernetes` primitives. The following are examples of [.noloc]`Kubernetes` primitives that you can use to analyze Amazon EKS cost allocation data. + +* Cluster name +* Deployment +* Namespace +* Node +* Workload Name +* Workload Type + +For more information about using split cost allocation data, see link:cur/latest/userguide/split-cost-allocation-data.html[Understanding split cost allocation data,type="documentation"] in the {aws} Billing User Guide. + +[[task-cur-setup,task-cur-setup.title]] +== Set up Cost and Usage Reports + +You can turn on Split Cost Allocation Data for EKS in the Cost Management Console, {aws} Command Line Interface, or the {aws} SDKs. + +Use the following for _Split Cost Allocation Data_: + +. Opt in to Split Cost Allocation Data. For more information, see link:cur/latest/userguide/enabling-split-cost-allocation-data.html[Enabling split cost allocation data,type="documentation"] in the {aws} Cost and Usage Report User Guide. +. Include the data in a new or existing report. +. View the report. You can use the Billing and Cost Management console or view the report files in Amazon Simple Storage Service. diff --git a/latest/ug/clusters/management/cost-monitoring-kubecost-bundles.adoc b/latest/ug/clusters/management/cost-monitoring-kubecost-bundles.adoc new file mode 100644 index 00000000..c3297daa --- /dev/null +++ b/latest/ug/clusters/management/cost-monitoring-kubecost-bundles.adoc @@ -0,0 +1,330 @@ +//!!NODE_ROOT
+[.topic] +[[cost-monitoring-kubecost-bundles,cost-monitoring-kubecost-bundles.title]] += Learn more about Kubecost +:info_doctype: section + +include::../../attributes.txt[] + +Amazon EKS provides an {aws} optimized bundle of [.noloc]`Kubecost` for cluster cost visibility. Amazon EKS supports [.noloc]`Kubecost`, which you can use to monitor your costs broken down by [.noloc]`Kubernetes` resources including [.noloc]`Pods`, nodes, namespaces, and labels. + +This topic covers the available versions of [.noloc]`Kubecost`, and the differences between the available tiers. EKS supports [.noloc]`Kubecost` Version 1 and Version 2. Each version is available in different tiers. You can use _Amazon EKS optimized [.noloc]`Kubecost` custom bundle_ for your EKS clusters at no additional cost. You may be charged for use of associated {aws} services, such as Amazon Managed Service for Prometheus. Also, you can use your existing {aws} support agreements to obtain support. + +As a [.noloc]`Kubernetes` platform administrator and finance leader, you can use [.noloc]`Kubecost` to visualize a breakdown of Amazon EKS charges, allocate costs, and charge back organizational units such as application teams. You can provide your internal teams and business units with transparent and accurate cost data based on their actual {aws} bill. Moreover, you can also get customized recommendations for cost optimization based on their infrastructure environment and usage patterns within their clusters. For more information about [.noloc]`Kubecost`, see the https://guide.kubecost.com[Kubecost] documentation. + +*What is the difference between the custom bundle of [.noloc]`Kubecost` and the free version of [.noloc]`Kubecost` (also known as [.noloc]`OpenCost`)?* + +{aws} and [.noloc]`Kubecost` collaborated to offer a customized version of [.noloc]`Kubecost`. This version includes a subset of commercial features at no additional charge. See the tables below for features that are included with in the custom bundle of [.noloc]`Kubecost`. + +[[kubecost-v2,kubecost-v2.title]] +== Kubecost v2 + +*What is the difference between [.noloc]`Kubecost` v1 and v2?* + +Kubecost 2.0 is a major upgrade from previous versions and includes major new features including a brand new API Backend. Note the https://docs.kubecost.com/apis/monitoring-apis/api-allocation[Allocation] and https://docs.kubecost.com/apis/monitoring-apis/assets-api[Assets] APIs are fully backwards compatible. https://docs.kubecost.com/install-and-configure/install/kubecostv2[Please review the Kubecost documentation to ensure a smooth transition.] For the full list of enhancements, https://github.com/kubecost/cost-analyzer-helm-chart/releases/tag/v2.0.0[please see the Kubecost release notes] + +[IMPORTANT] +==== + +https://docs.kubecost.com/install-and-configure/install/kubecostv2[Review the Kubecost documentation before upgrading.] Upgrading may impact report availability. + +==== + +*Core features comparison:* + +[cols="1,1,1,1", options="header"] +|=== + +| Feature +| Kubecost free tier 2.0 +| Amazon EKS optimized Kubecost bundle 2.0 +| Kubecost Enterprise 2.0 + + +| Cluster cost visibility +| Single clusters up to 250 cores +| Unified multi-cluster without core limits when integrated with Amazon Managed Service for Prometheus +| Unified and unlimited number of clusters across unlimited numbers of environments (i.e. multi-cloud) + +| Deployment +| User hosted +| User hosted +| User hosted, Kubecost hosted (dedicated tenant), SaaS + +| Databases supported +| Local Prometheus +| Amazon Managed Service for Prometheus or Local Prometheus +| Any prometheus flavor and custom databases + +| Database retention support (raw metrics) +| 15 days +| Unlimited historical data +| Unlimited historical data + +| Kubecost API and UI retention (ETL) +| 15 days +| 15 days +| Unlimited + +| Hybrid cloud visibility +| - +| Amazon EKS and Amazon EKS Anywhere clusters +| Multi-cloud and hybrid cloud + +| Alerts and recurring reports +| Only supported on the primary cluster, limited to 250 cores +| Efficiency alerts, budget alerts, spend change alerts, and https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/alerts[more supported] across all clusters +| Efficiency alerts, budget alerts, spend change alerts, and https://docs.kubecost.com/using-kubecost/navigating-the-kubecost-ui/alerts[more supported] across all clusters + +| Saved reports +| - +| Reports using 15 days of metrics +| Reports using unlimited historical data and metrics + +| Cloud billing integration +| Only supported on the primary cluster, limited to 250 cores +| Custom pricing support for {aws} (including multiple clusters and multiple accounts) +| Custom pricing support for any cloud + +| Savings recommendations +| Only supported on the primary cluster, limited to 250 cores +| Primary cluster insights, but there is no 250 core limit +| Multi-cluster insights + +| Governance: Audits +| - +| - +| Audit historical cost events + +| Single sign-on (SSO) support +| - +| Amazon Cognito supported +| Okta, Auth0, PingID, KeyCloak, and anything else custom + +| Role-based access control (RBAC) with SAML 2.0 +| - +| - +| Okta, Auth0, PingID, KeyCloak, and anything else custom + +| Enterprise training and onboarding +| - +| - +| Full-service training and FinOps onboarding + +| Teams +| - +| - +| Yes + +|=== + +*New Features:* + +The following features have metric limits: + + + +* Kubecost Aggregator +* Network Monitoring +* Kubecost Actions +* Collections +* Anomaly detection +* Container Request Right-Sizing +* Kubecost Forecasting +* Autocomplete for filtering and aggregation + +*Metric limits:* + +[cols="1,1,1,1", options="header"] +|=== +|Metric +|Kubecost Free Tier 2.0 +|Amazon EKS Optimized Kubecost Custom Bundle + 2.0 +|Kubecost Enterprise 2.0 + + +|Cluster size +|Limited to 250 cores +|Unlimited +|Unlimited + +|Metric retention +|15 days +|15 days +|Unlimited + +|Multi-cluster support +|Not available +|Available +|Available + +|Core limits +|250 cores per cluster +|No core limits +|No core limits +|=== + +[[kubecost-v1,kubecost-v1.title]] +== Kubecost v1 + +[cols="1,1,1,1", options="header"] +|=== +|Feature +|Kubecost free tier +|Amazon EKS optimized Kubecost custom bundle +|Kubecost Enterprise + + +|*Deployment* +|User hosted +|User hosted +|User hosted or [.noloc]`Kubecost` hosted (SaaS) + +|*Number of clusters supported* +|Unlimited +|Unlimited +|Unlimited + +|*Databases supported* +|Local [.noloc]`Prometheus` +|Local [.noloc]`Prometheus` or Amazon Managed Service for Prometheus +|[.noloc]`Prometheus`, Amazon Managed Service for Prometheus, [.noloc]`Cortex`, or [.noloc]`Thanos` + +|*Database retention support* +|15 days +|Unlimited historical data +|Unlimited historical data + +|*[.noloc]`Kubecost` API retention (ETL)* +|15 days +|15 days +|Unlimited historical data + +|*Cluster cost visibility* +|Single clusters +|Unified multi-cluster +|Unified multi-cluster + +|*Hybrid cloud visibility* +|- +|Amazon EKS and Amazon EKS Anywhere clusters +|Multi-cloud and hybrid-cloud support + +|*Alerts and recurring reports* +|- +|Efficiency alerts, budget alerts, spend change alerts, and more supported +|Efficiency alerts, budget alerts, spend change alerts, and more supported + +|*Saved reports* +|- +|Reports using 15 days data +|Reports using unlimited historical data + +|*Cloud billing integration* +|Required for each individual cluster +|Custom pricing support for {aws} (including multiple clusters and multiple accounts) +|Custom pricing support for {aws} (including multiple clusters and multiple accounts) + +|*Savings recommendations* +|Single cluster insights +|Single cluster insights +|Multi-cluster insights + +|*Governance: Audits* +|- +|- +|Audit historical cost events + +|*Single sign-on (SSO) support* +|- +|Amazon Cognito supported +|[.noloc]`Okta`, [.noloc]`Auth0`, [.noloc]`PingID`, KeyCloak + +|*Role-based access control (RBAC) with SAML `2.0`* +|- +|- +|[.noloc]`Okta`, [.noloc]`Auth0`, [.noloc]`PingID`, [.noloc]`Keycloak` + +|*Enterprise training and onboarding* +|- +|- +|Full-service training and [.noloc]`FinOps` onboarding +|=== + +[[cost-monitoring-faq,cost-monitoring-faq.title]] +== Frequently asked questions + +See the following common questions and answers about using [.noloc]`Kubecost` with Amazon EKS. + +*What is the Kubecost API retention (ETL) feature?* + +The Kubecost ETL feature aggregates and organizes metrics to surface cost visibility at various levels of granularity (such as `namespace-level`, `pod-level`, and `deployment-level`). For the custom Kubecost bundle, customers get data and insights from metrics for the last 15 days. + +*What is the alerts and recurring reports feature? What alerts and reports does it include?* + +Kubecost alerts allow teams to receive updates on real-time Kubernetes spend as well as cloud spend. Recurring reports enable teams to receive customized views of historical Kubernetes and cloud spend. Both are configurable using the Kubecost UI or Helm values. They support email, Slack, and Microsoft Teams. + +*What do saved reports include?* + +Kubecost saved reports are predefined views of cost and efficiency metrics. They include cost by cluster, namespace, label, and more. + +*What is cloud billing integration?* + +Integration with {aws} billing APIs allows Kubecost to display out-of-cluster costs (such as Amazon S3). Additionally, it allows Kubecost to reconcile Kubecost`'s in-cluster predictions with actual billing data to account for spot usage, savings plans, and enterprise discounts. + +*What do savings recommendations include?* + +Kubecost provides insights and automation to help users optimize their Kubernetes infrastructure and spend. + +*Is there a charge for this functionality?* + +No. You can use this version of Kubecost at no additional charge. If you want additional Kubecost capabilities that aren`'t included in this bundle, you can buy an enterprise license of Kubecost through the {aws} Marketplace, or from Kubecost directly. + +*Is support available?* + +Yes. You can open a support case with the {aws} Support team at link:contact-us/[Contact {aws},type="marketing"]. + +*Do I need a license to use Kubecost features provided by the Amazon EKS integration?* + +No. + +*Can I integrate Kubecost with {aws} Cost and Usage Report for more accurate reporting?* + +Yes. You can configure Kubecost to ingest data from {aws} Cost and Usage Report to get accurate cost visibility, including discounts, Spot pricing, reserved instance pricing, and others. For more information, see https://docs.kubecost.com/install-and-configure/install/cloud-integration/aws-cloud-integrations[{aws} Cloud Billing Integration] in the Kubecost documentation. + +*Does this version support cost management of self-managed Kubernetes clusters on Amazon EC2?* + +No. This version is only compatible with Amazon EKS clusters. + +*Can Kubecost track costs for Amazon EKS on {aws} Fargate?* + +Kubecost provides best effort to show cluster cost visibility for Amazon EKS on Fargate, but with lower accuracy than with Amazon EKS on Amazon EC2. This is primarily due to the difference in how you`'re billed for your usage. With Amazon EKS on Fargate, you`'re billed for consumed resources. With Amazon EKS on Amazon EC2 nodes, you`'re billed for provisioned resources. Kubecost calculates the cost of an Amazon EC2 node based on the node specification, which includes CPU, RAM, and ephemeral storage. With Fargate, costs are calculated based on the requested resources for the Fargate Pods. + +*How can I get updates and new versions of Kubecost?* + +You can upgrade your Kubecost version using standard Helm upgrade procedures. The latest versions are in the https://gallery.ecr.aws/kubecost/cost-analyzer[Amazon ECR Public Gallery]. + +*Is the `*kubectl-cost*` CLI supported? How do I install it?* + +Yes. `Kubectl-cost` is an open source tool by Kubecost (Apache 2.0 License) that provides CLI access to Kubernetes cost allocation metrics. To install `kubectl-cost`, see https://github.com/kubecost/kubectl-cost#installation[Installation] on GitHub. + +*Is the Kubecost user interface supported? How do I access it?* + +Kubecost provides a web dashboard that you can access through `kubectl` port forwarding, an ingress, or a load balancer. You can also use the {aws} Load Balancer Controller to expose [.noloc]`Kubecost` and use Amazon Cognito for authentication, authorization, and user management. For more information, see link:containers/how-to-use-application-load-balancer-and-amazon-cognito-to-authenticate-users-for-your-kubernetes-web-apps[How to use Application Load Balancer and Amazon Cognito to authenticate users for your Kubernetes web apps,type="blog"] on the {aws} blog. + +*Is Amazon EKS Anywhere supported?* + +No. + +[[kubecost-additional,kubecost-additional.title]] +== Additional [.noloc]`Kubecost` Features + +* The following features are available in both [.noloc]`Kubecost` v1 and v2. +* *Export cost metrics* – Amazon EKS optimized cost monitoring is deployed with [.noloc]`Kubecost` and [.noloc]`Prometheus`, which is an open-source monitoring system and time series database. [.noloc]`Kubecost` reads metric from [.noloc]`Prometheus` and then performs cost allocation calculations and writes the metrics back to [.noloc]`Prometheus`. The [.noloc]`Kubecost` front-end reads metrics from [.noloc]`Prometheus` and shows them on the [.noloc]`Kubecost` user interface. The architecture is illustrated in the following diagram. ++ +image::images/kubecost-architecture.png[Kubecost architecture,scaledwidth=100%] ++ +With https://prometheus.io/[Prometheus] pre-installed, you can write queries to ingest [.noloc]`Kubecost` data into your current business intelligence system for further analysis. You can also use it as a data source for your current https://grafana.com/[Grafana] dashboard to display Amazon EKS cluster costs that your internal teams are familiar with. To learn more about how to write [.noloc]`Prometheus` queries, see the https://github.com/opencost/opencost/blob/develop/PROMETHEUS.md[Prometheus Configuration]``readme`` file on GitHub or use the example [.noloc]`Grafana` JSON models in the https://github.com/kubecost/cost-analyzer-helm-chart/tree/develop/cost-analyzer[Kubecost Github repository] as references. +* *{aws} Cost and Usage Report integration* – To perform cost allocation calculations for your Amazon EKS cluster, [.noloc]`Kubecost` retrieves the public pricing information of {aws} services and {aws} resources from the {aws} Price List API. You can also integrate [.noloc]`Kubecost` with *{aws} Cost and Usage Report*:: + to enhance the accuracy of the pricing information specific to your {aws} account. This information includes enterprise discount programs, reserved instance usage, savings plans, and spot usage. To learn more about how the {aws} Cost and Usage Report integration works, see https://docs.kubecost.com/install-and-configure/install/cloud-integration/aws-cloud-integrations[{aws} Cloud Billing Integration] in the [.noloc]`Kubecost` documentation. diff --git a/latest/ug/clusters/management/cost-monitoring-kubecost.adoc b/latest/ug/clusters/management/cost-monitoring-kubecost.adoc new file mode 100644 index 00000000..5f831184 --- /dev/null +++ b/latest/ug/clusters/management/cost-monitoring-kubecost.adoc @@ -0,0 +1,115 @@ + +[.topic] +[[cost-monitoring-kubecost,cost-monitoring-kubecost.title]] += Install Kubecost and access dashboard +:info_doctype: section + +include::../../attributes.txt[] + +Amazon EKS supports [.noloc]`Kubecost`, which you can use to monitor your costs broken down by [.noloc]`Kubernetes` resources including [.noloc]`Pods`, nodes, namespaces, and labels. This topic covers installing [.noloc]`Kubecost`, and accessing the [.noloc]`Kubecost` dashboard. + +Amazon EKS provides an {aws} optimized bundle of [.noloc]`Kubecost` for cluster cost visibility. You can use your existing {aws} support agreements to obtain support. For more information about the available versions of [.noloc]`Kubecost`, see <>. + +As a [.noloc]`Kubernetes` platform administrator and finance leader, you can use [.noloc]`Kubecost` to visualize a breakdown of Amazon EKS charges, allocate costs, and charge back organizational units such as application teams. You can provide your internal teams and business units with transparent and accurate cost data based on their actual {aws} bill. Moreover, you can also get customized recommendations for cost optimization based on their infrastructure environment and usage patterns within their clusters. + +[NOTE] +==== + +Kubecost v2 introduces several major new features. <> + +==== + +For more information about [.noloc]`Kubecost`, see the https://guide.kubecost.com[Kubecost] documentation. + + +[[kubecost-addon,kubecost-addon.title]] +== Install Kubecost using Amazon EKS Add-ons + +[NOTE] +==== +Install Kubecost as an Amazon EKS Add-on and benefit from additional features at no additional cost with the Amazon EKS optimized Kubecost bundle. For more information, see <>. +==== + +Amazon EKS Add-ons reduce the complexity of upgrading Kubecost, and managing licenses. EKS Add-ons are integrated with the {aws} marketplace. + +. View link:marketplace/seller-profile?id=983de668-2731-4c99-a7e2-74f27d796173[Kubecost in the {aws} Marketplace console,type="marketing"] and subscribe. +. Determine the name of your cluster, and the region. Verify you are logged into the {aws} CLI with sufficient permissions to manage EKS. +. Create the Kubecost addon. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-addon --addon-name kubecost_kubecost --cluster-name $YOUR_CLUSTER_NAME --region $AWS_REGION +---- + +Learn how to <>, such as Kubecost. + +[[kubecost-helm,kubecost-helm.title]] +== Install Kubecost using Helm + +* An existing Amazon EKS cluster. To deploy one, see <>. The cluster must have Amazon EC2 nodes because you can't run [.noloc]`Kubecost` on Fargate nodes. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* Helm version 3.9.0 or later configured on your device or {aws} CloudShell. To install or update Helm, see <>. +* If your cluster is version `1.23` or later, you must have the <> installed on your cluster. +. Determine the version of [.noloc]`Kubecost` to install. You can see the available versions at https://gallery.ecr.aws/kubecost/cost-analyzer[kubecost/cost-analyzer] in the Amazon ECR Public Gallery. For more information about the compatibility of [.noloc]`Kubecost` versions and Amazon EKS, see the https://docs.kubecost.com/install-and-configure/install/environment[Environment Requirements] in the Kubecost documentation. +. Install [.noloc]`Kubecost` with the following command. Replace [.replaceable]`kubecost-version` with the value retrieved from ECR, such as [.replaceable]`1.108.1`. ++ +[source,bash,subs="verbatim,attributes"] +---- +helm upgrade -i kubecost oci://public.ecr.aws/kubecost/cost-analyzer --version kubecost-version \ + --namespace kubecost --create-namespace \ + -f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/cost-analyzer/values-eks-cost-monitoring.yaml +---- ++ +[.noloc]`Kubecost` releases new versions regularly. You can update your version using https://helm.sh/docs/helm/helm_upgrade/[helm upgrade]. By default, the installation includes a local https://prometheus.io/[Prometheus] server and `kube-state-metrics`. You can customize your deployment to use link:mt/integrating-kubecost-with-amazon-managed-service-for-prometheus[Amazon Managed Service for Prometheus,type="blog"] by following the documentation in link:prometheus/latest/userguide/integrating-kubecost.html[Integrating with Amazon EKS cost monitoring,type="documentation"]. For a list of all other settings that you can configure, see the https://github.com/kubecost/cost-analyzer-helm-chart/blob/develop/cost-analyzer/values-eks-cost-monitoring.yaml[sample configuration file] on GitHub. ++ +You can remove [.noloc]`Kubecost` from your cluster with the following commands. ++ +[source,bash,subs="verbatim,attributes"] +---- +helm uninstall kubecost --namespace kubecost +kubectl delete ns kubecost +---- + + + +[[kubecost-dashboard,kubecost-dashboard.title]] +== Access Kubecost Dashboard +. Make sure the required [.noloc]`Pods` are running. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kubecost +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY STATUS RESTARTS AGE +kubecost-cost-analyzer-b9788c99f-5vj5b 2/2 Running 0 3h27m +kubecost-kube-state-metrics-99bb8c55b-bn2br 1/1 Running 0 3h27m +kubecost-prometheus-server-7d9967bfc8-9c8p7 2/2 Running 0 3h27m +---- +. On your device, enable port-forwarding to expose the [.noloc]`Kubecost` dashboard. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl port-forward --namespace kubecost deployment/kubecost-cost-analyzer 9090 +---- ++ +Alternatively, you can use the <> to expose [.noloc]`Kubecost` and use Amazon Cognito for authentication, authorization, and user management. For more information, see link:containers/how-to-use-application-load-balancer-and-amazon-cognito-to-authenticate-users-for-your-kubernetes-web-apps[How to use Application Load Balancer and Amazon Cognito to authenticate users for your Kubernetes web apps,type="blog"]. +. On the same device that you completed the previous step on, open a web browser and enter the following address. ++ +[source,bash,subs="verbatim,attributes"] +---- +http://localhost:9090 +---- ++ +You see the [.noloc]`Kubecost` Overview page in your browser. It might take 5–10 minutes for [.noloc]`Kubecost` to gather metrics. You can see your Amazon EKS spend, including cumulative cluster costs, associated [.noloc]`Kubernetes` asset costs, and monthly aggregated spend. ++ +image::images/kubecost.png[Kubecost dashboard,scaledwidth=100%] +. To track costs at a cluster level, tag your Amazon EKS resources for billing. For more information, see <>. + + +* *Cost allocation* – View monthly Amazon EKS costs and cumulative costs for each of your namespaces and other dimensions over the past seven days. This is helpful for understanding which parts of your application are contributing to Amazon EKS spend. +* *Assets* – View the costs of the {aws} infrastructure assets that are associated with your Amazon EKS resources. diff --git a/latest/ug/clusters/management/cost-monitoring.adoc b/latest/ug/clusters/management/cost-monitoring.adoc new file mode 100644 index 00000000..6f92be95 --- /dev/null +++ b/latest/ug/clusters/management/cost-monitoring.adoc @@ -0,0 +1,30 @@ +//!!NODE_ROOT
+ + +[.topic] +[[cost-monitoring,cost-monitoring.title]] += Monitor and optimize Amazon EKS cluster costs +:info_doctype: section +:info_title: Monitor and optimize Amazon EKS cluster costs +:info_titleabbrev: Cost monitoring +:keywords: cost, monitoring, watch +:info_abstract: Learn how to monitor and optimize costs for your Amazon EKS clusters using {aws} Billing split cost allocation data or Kubecost, a Kubernetes-native cost monitoring tool integrated with {aws}. + +include::../../attributes.txt[] + +[abstract] +-- +Learn how to monitor and optimize costs for your Amazon EKS clusters using {aws} Billing split cost allocation data or Kubecost, a Kubernetes-native cost monitoring tool integrated with {aws}. +-- + +Cost monitoring is an essential aspect of managing your [.noloc]`Kubernetes` clusters on Amazon EKS. By gaining visibility into your cluster costs, you can optimize resource utilization, set budgets, and make data-driven decisions about your deployments. Amazon EKS provides two cost monitoring solutions, each with its own unique advantages, to help you track and allocate your costs effectively: + +*{aws} Billing split cost allocation data for Amazon EKS* -- This native feature integrates seamlessly with the {aws} Billing Console, allowing you to analyze and allocate costs using the same familiar interface and workflows you use for other {aws} services. With split cost allocation, you can gain insights into your [.noloc]`Kubernetes` costs directly alongside your other {aws} spend, making it easier to optimize costs holistically across your {aws} environment. You can also leverage existing {aws} Billing features like Cost Categories and Cost Anomaly Detection to further enhance your cost management capabilities. For more information, see link:cur/latest/userguide/split-cost-allocation-data.html[Understanding split cost allocation data,type="documentation"] in the {aws} Billing User Guide. + +*[.noloc]`Kubecost`* -- Amazon EKS supports Kubecost, a Kubernetes cost monitoring tool. Kubecost offers a feature-rich, Kubernetes-native approach to cost monitoring, providing granular cost breakdowns by Kubernetes resources, cost optimization recommendations, and out-of-the-box dashboards and reports. Kubecost also retrieves accurate pricing data by integrating with the {aws} Cost and Usage Report, ensuring you get a precise view of your Amazon EKS costs. Learn how to <>. + +include::cost-monitoring-aws.adoc[leveloffset=+1] + +include::cost-monitoring-kubecost.adoc[leveloffset=+1] + +include::cost-monitoring-kubecost-bundles.adoc[leveloffset=+1] diff --git a/latest/ug/clusters/management/eks-managing.adoc b/latest/ug/clusters/management/eks-managing.adoc new file mode 100644 index 00000000..42e6db90 --- /dev/null +++ b/latest/ug/clusters/management/eks-managing.adoc @@ -0,0 +1,40 @@ +//!!NODE_ROOT +include::../../attributes.txt[] +[[eks-managing,eks-managing.title]] += Organize and monitor cluster resources +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Organize and monitor cluster resources +:info_titleabbrev: Cluster management + +This chapter includes the following topics to help you manage your cluster. You can also view information about your <> with the {aws-management-console}. + + + +* The [.noloc]`Kubernetes` Dashboard is a general purpose, web-based UI for [.noloc]`Kubernetes` clusters. It allows users to manage applications running in the cluster and troubleshoot them, as well as manage the cluster itself. For more information, see The https://github.com/kubernetes/dashboard[Kubernetes Dashboard] GitHub repository. +* <> – The [.noloc]`Kubernetes` Metrics Server is an aggregator of resource usage data in your cluster. It isn't deployed by default in your cluster, but is used by [.noloc]`Kubernetes` add-ons, such as the [.noloc]`Kubernetes` Dashboard and <>. In this topic you learn how to install the Metrics Server. +* <> – The Helm package manager for [.noloc]`Kubernetes` helps you install and manage applications on your [.noloc]`Kubernetes` cluster. This topic helps you install and run the Helm binaries so that you can install and manage charts using the Helm CLI on your local computer. +* <> – To help you manage your Amazon EKS resources, you can assign your own metadata to each resource in the form of _tags_. This topic describes tags and shows you how to create them. +* <> – Your {aws} account has default quotas, formerly referred to as limits, for each {aws} service. Learn about the quotas for Amazon EKS and how to increase them. + + +include::cost-monitoring.adoc[leveloffset=+1] + + +include::metrics-server.adoc[leveloffset=+1] + + +include::helm.adoc[leveloffset=+1] + + +include::eks-using-tags.adoc[leveloffset=+1] + + +include::service-quotas.adoc[leveloffset=+1] diff --git a/latest/ug/clusters/management/eks-using-tags.adoc b/latest/ug/clusters/management/eks-using-tags.adoc new file mode 100644 index 00000000..447b8799 --- /dev/null +++ b/latest/ug/clusters/management/eks-using-tags.adoc @@ -0,0 +1,215 @@ +//!!NODE_ROOT
+include::../../attributes.txt[] + +[.topic] +[[eks-using-tags,eks-using-tags.title]] += Organize Amazon EKS resources with tags +:info_doctype: section +:info_title: Organize Amazon EKS resources with tags +:info_titleabbrev: Tagging your resources +:keywords: metadata, tag, resources +:info_abstract: Learn how to use tags to categorize and manage your Amazon EKS resources like clusters, managed node groups, and Fargate profiles for billing, cost allocation, and resource identification. + +[abstract] +-- +Learn how to use tags to categorize and manage your Amazon EKS resources like clusters, managed node groups, and Fargate profiles for billing, cost allocation, and resource identification. +-- + +You can use _tags_ to help you manage your Amazon EKS resources. This topic provides an overview of the tags function and shows how you can create tags. + +[.topiclist] +[[Topic List]] + +[NOTE] +==== + +Tags are a type of metadata that's separate from [.noloc]`Kubernetes` labels and annotations. For more information about these other metadata types, see the following sections in the [.noloc]`Kubernetes` documentation: + + + +* https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/[Labels and Selectors] +* https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/[Annotations] + +==== + +[[tag-basics,tag-basics.title]] +== Tag basics + +A tag is a label that you assign to an {aws} resource. Each tag consists of a _key_ and an optional _value_. + +With tags, you can categorize your {aws} resources. For example, you can categorize resources by purpose, owner, or environment. When you have many resources of the same type, you can use the tags that you assigned to a specific resource to quickly identify that resource. For example, you can define a set of tags for your Amazon EKS clusters to help you track each cluster's owner and stack level. We recommend that you devise a consistent set of tag keys for each resource type. You can then search and filter the resources based on the tags that you add. + +After you add a tag, you can edit tag keys and values or remove tags from a resource at any time. If you delete a resource, any tags for the resource are also deleted. + +Tags don't have any semantic meaning to Amazon EKS and are interpreted strictly as a string of characters. You can set the value of a tag to an empty string. However, you can't set the value of a tag to null. If you add a tag that has the same key as an existing tag on that resource, the new value overwrites the earlier value. + +If you use {aws} Identity and Access Management (IAM), you can control which users in your {aws} account have permission to manage tags. + +[[tag-resources,tag-resources.title]] +== Tagging your resources + +The following Amazon EKS resources support tags: + + + +* clusters +* managed node groups +* Fargate profiles + +You can tag these resources using the following: + + + +* If you're using the Amazon EKS console, you can apply tags to new or existing resources at any time. You can do this by using the *Tags* tab on the relevant resource page. For more information, see <>. +* If you're using `eksctl`, you can apply tags to resources when they're created using the `--tags` option. +* If you're using the {aws} CLI, the Amazon EKS API, or an {aws} SDK, you can apply tags to new resources using the `tags` parameter on the relevant API action. You can apply tags to existing resources using the `TagResource` API action. For more information, see link:eks/latest/APIReference/API_TagResource.html[TagResource,type="documentation"]. + +When you use some resource-creating actions, you can also specify tags for the resource at the same time that you create it. If tags can't be applied while the resource is being created, the resource fails to be created. This mechanism ensures that resources that you intend to tag are either created with the tags that you specify or not created at all. If you tag resources when you create them, you don't need to run custom tagging scripts after you create the resource. + +Tags don't propagate to other resources that are associated with the resource that you create. For example, Fargate profile tags don't propagate to other resources that are associated with the Fargate profile, such as the [.noloc]`Pods` that are scheduled with it. + +[[tag-restrictions,tag-restrictions.title]] +== Tag restrictions + +The following restrictions apply to tags: + +* A maximum of 50 tags can be associated with a resource. +* Tag keys can't be repeated for one resource. Each tag key must be unique, and can only have one value. +* Keys can be up to 128 characters long in UTF-8. +* Values can be up to 256 characters long in UTF-8. +* If multiple {aws} services and resources use your tagging schema, limit the types of characters you use. Some services might have restrictions on allowed characters. Generally, allowed characters are letters, numbers, spaces, and the following characters: `+` `-` `=` `.` `_` `:` `/` `@`. +* Tag keys and values are case sensitive. +* Don't use `aws:`, `{aws}:`, or any upper or lowercase combination of such as a prefix for either keys or values. These are reserved only for {aws} use. You can't edit or delete tag keys or values with this prefix. Tags with this prefix don't count against your tags-per-resource limit. + + +[[tag-resources-for-billing,tag-resources-for-billing.title]] +== Tagging your resources for billing + +When you apply tags to Amazon EKS clusters, you can use them for cost allocation in your *Cost & Usage Reports*. The metering data in your *Cost & Usage Reports* shows usage across all of your Amazon EKS clusters. For more information, see link:awsaccountbilling/latest/aboutv2/billing-reports-costusage.html[{aws} cost and usage report,type="documentation"] in the _{aws} Billing User Guide_. + +The {aws} generated cost allocation tag, specifically `aws:eks:cluster-name`, lets you break down Amazon EC2 instance costs by individual Amazon EKS cluster in *Cost Explorer*. However, this tag doesn't capture the control plane expenses. The tag is automatically added to Amazon EC2 instances that participate in an Amazon EKS cluster. This behavior happens regardless of whether the instances are provisioned using Amazon EKS managed node groups, [.noloc]`Karpenter`, or directly with Amazon EC2. This specific tag doesn't count towards the 50 tags limit. To use the tag, the account owner must activate it in the {aws} Billing console or by using the API. When an {aws} Organizations management account owner activates the tag, it's also activated for all organization member accounts. + +You can also organize your billing information based on resources that have the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information. That way, you can see the total cost of that application across several services. For more information about setting up a cost allocation report with tags, see link:awsaccountbilling/latest/aboutv2/configurecostallocreport.html[The Monthly Cost Allocation Report,type="documentation"] in the _{aws} Billing User Guide_. + +[NOTE] +==== + +If you just enabled reporting, data for the current month is available for viewing after 24 hours. + +==== + +*Cost Explorer* is a reporting tool that's available as part of the {aws} Free Tier. You can use *Cost Explorer* to view charts of your Amazon EKS resources from the last 13 months. You can also forecast how much you're likely to spend for the next three months. You can see patterns in how much you spend on {aws} resources over time. For example, you can use it to identify areas that need further inquiry and see trends that you can use to understand your costs. You also can specify time ranges for the data, and view time data by day or by month. + +[[tag-resources-console,tag-resources-console.title]] +== Working with tags using the console + +Using the Amazon EKS console, you can manage the tags that are associated with new or existing clusters and managed node groups. + +When you select a resource-specific page in the Amazon EKS console, the page displays a list of those resources. For example, if you select *Clusters* from the left navigation pane, the console displays a list of Amazon EKS clusters. When you select a resource from one of these lists (for example, a specific cluster) that supports tags, you can view and manage its tags on the *Tags* tab. + +You can also use *Tag Editor* in the {aws-management-console}, which provides a unified way to manage your tags. For more information, see link:ARG/latest/userguide/tag-editor.html[Tagging your {aws} resources with Tag Editor,type="documentation"] in the _{aws} Tag Editor User Guide_. + +[[adding-tags-creation,adding-tags-creation.title]] +=== Adding tags on a resource on creation + +You can add tags to Amazon EKS clusters, managed node groups, and Fargate profiles when you create them. For more information, see <>. + +[[adding-or-deleting-tags,adding-or-deleting-tags.title]] +=== Adding and deleting tags on a resource + +You can add or delete the tags that are associated with your clusters directly from the resource's page. + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. On the navigation bar, select the {aws} Region to use. +. In the left navigation pane, choose *Clusters*. +. Choose a specific cluster. +. Choose the *Tags* tab, and then choose *Manage tags*. +. On the *Manage tags* page, add or delete your tags as necessary. ++ +** To add a tag, choose *Add tag*. Then specify the key and value for each tag. +** To delete a tag, choose *Remove tag*. +. Repeat this process for each tag that you want to add or delete. +. Choose *Update* to finish. + + +[[tag-resources-api-sdk,tag-resources-api-sdk.title]] +== Working with tags using the CLI, API, or `eksctl` + +Use the following {aws} CLI commands or Amazon EKS API operations to add, update, list, and delete the tags for your resources. You can only use `eksctl` to add tags while simultaneously creating the new resources with one command. +[[tag-eks-resources-table]] +[cols="1,1,1,1", options="header"] +|=== +|Task +|{aws} CLI +|{aws} Tools for Windows PowerShell +|API action + + +|Add or overwrite one or more tags. +|link:cli/latest/reference/eks/tag-resource.html[tag-resource,type="documentation"] +|link:powershell/latest/reference/items/Add-EKSResourceTag.html[Add-EKSResourceTag,type="documentation"] +|link:eks/latest/APIReference/API_TagResource.html[TagResource,type="documentation"] + +|Delete one or more tags. +|link:cli/latest/reference/eks/untag-resource.html[untag-resource,type="documentation"] +|link:powershell/latest/reference/items/Remove-EKSResourceTag.html[Remove-EKSResourceTag,type="documentation"] +|link:eks/latest/APIReference/API_UntagResource.html[UntagResource,type="documentation"] +|=== + +The following examples show how to tag or untag resources using the {aws} CLI. + +.Example 1: Tag an existing cluster +The following command tags an existing cluster. + +[source,bash,subs="verbatim,attributes"] +---- +aws eks tag-resource --resource-arn resource_ARN --tags team=devs +---- + +.Example 2: Untag an existing cluster +The following command deletes a tag from an existing cluster. + +[source,bash,subs="verbatim,attributes"] +---- +aws eks untag-resource --resource-arn resource_ARN --tag-keys tag_key +---- + +.Example 3: List tags for a resource +The following command lists the tags that are associated with an existing resource. + +[source,bash,subs="verbatim,attributes"] +---- +aws eks list-tags-for-resource --resource-arn resource_ARN +---- + +When you use some resource-creating actions, you can specify tags at the same time that you create the resource. The following actions support specifying a tag when you create a resource. + +[cols="1,1,1,1,1", options="header"] +|=== +|Task +|{aws} CLI +|{aws} Tools for Windows PowerShell +|API action +|eksctl + + +|Create a cluster +|link:cli/latest/reference/eks/create-cluster.html[create-cluster,type="documentation"] +|link:powershell/latest/reference/items/New-EKSCluster.html[New-EKSCluster,type="documentation"] +|link:eks/latest/APIReference/API_CreateCluster.html[CreateCluster,type="documentation"] +|`create cluster` + +|Create a managed node group* +|link:cli/latest/reference/eks/create-nodegroup.html[create-nodegroup,type="documentation"] +|link:powershell/latest/reference/items/New-EKSNodegroup.html[New-EKSNodegroup,type="documentation"] +|link:eks/latest/APIReference/API_CreateNodegroup.html[CreateNodegroup,type="documentation"] +|`create nodegroup` + +|Create a Fargate profile +|link:cli/latest/reference/eks/create-fargate-profile.html[create-fargate-profile,type="documentation"] +|link:powershell/latest/reference/items/New-EKSFargateProfile.html[New-EKSFargateProfile,type="documentation"] +|link:eks/latest/APIReference/API_CreateFargateProfile.html[CreateFargateProfile.html,type="documentation"] +|`create fargateprofile` +|=== + +* If you want to also tag the Amazon EC2 instances when you create a managed node group, create the managed node group using a launch template. For more information, see <>. If your instances already exist, you can manually tag the instances. For more information, see link:AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources[Tagging your resources,type="documentation"] in the Amazon EC2 User Guide. diff --git a/latest/ug/clusters/management/helm.adoc b/latest/ug/clusters/management/helm.adoc new file mode 100644 index 00000000..56c8385e --- /dev/null +++ b/latest/ug/clusters/management/helm.adoc @@ -0,0 +1,77 @@ +//!!NODE_ROOT
+include::../../attributes.txt[] + +[.topic] +[[helm,helm.title]] += Deploy applications with [.noloc]`Helm` on Amazon EKS +:info_doctype: section +:info_title: Deploy applications with Helm on Amazon EKS +:info_titleabbrev: Deploy apps with Helm +:info_abstract: Learn how to install and use Helm, a package manager for Kubernetes, with your Amazon EKS cluster to manage and deploy applications seamlessly. + +[abstract] +-- +Learn how to install and use Helm, a package manager for Kubernetes, with your Amazon EKS cluster to manage and deploy applications seamlessly. +-- + +The Helm package manager for [.noloc]`Kubernetes` helps you install and manage applications on your [.noloc]`Kubernetes` cluster. For more information, see the https://docs.helm.sh/[Helm documentation]. This topic helps you install and run the Helm binaries so that you can install and manage charts using the Helm CLI on your local system. + +[IMPORTANT] +==== + +Before you can install Helm charts on your Amazon EKS cluster, you must configure `kubectl` to work for Amazon EKS. If you have not already done this, see <> before proceeding. If the following command succeeds for your cluster, you're properly configured. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc +---- + +==== +. Run the appropriate command for your client operating system. ++ +** If you're using macOS with https://brew.sh/[Homebrew], install the binaries with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +brew install helm +---- +** If you're using [.noloc]`Windows` with https://chocolatey.org/[Chocolatey], install the binaries with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +choco install kubernetes-helm +---- +** If you're using [.noloc]`Linux`, install the binaries with the following commands. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 > get_helm.sh +chmod 700 get_helm.sh +./get_helm.sh +---- ++ +NOTE: If you get a message that `openssl` must first be installed, you can install it with the following command. + +[source,bash,subs="verbatim,attributes"] +---- +sudo yum install openssl +---- +. To pick up the new binary in your `PATH`, Close your current terminal window and open a new one. +. See the version of Helm that you installed. ++ +[source,bash,subs="verbatim,attributes"] +---- +helm version | cut -d + -f 1 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v3.9.0 +---- +. At this point, you can run any Helm commands (such as `helm install [.replaceable]``chart-name```) to install, modify, delete, or query Helm charts in your cluster. If you're new to Helm and don't have a specific chart to install, you can: ++ +** Experiment by installing an example chart. See https://helm.sh/docs/intro/quickstart#install-an-example-chart[Install an example chart] in the Helm https://helm.sh/docs/intro/quickstart/[Quickstart guide]. +** Create an example chart and push it to Amazon ECR. For more information, see link:AmazonECR/latest/userguide/push-oci-artifact.html[Pushing a Helm chart,type="documentation"] in the _Amazon Elastic Container Registry User Guide_. +** Install an Amazon EKS chart from the https://github.com/aws/eks-charts#eks-charts[eks-charts][.noloc]`GitHub` repo or from https://artifacthub.io/packages/search?page=1&repo=aws[ArtifactHub]. diff --git a/latest/ug/clusters/management/images b/latest/ug/clusters/management/images new file mode 120000 index 00000000..5fa69870 --- /dev/null +++ b/latest/ug/clusters/management/images @@ -0,0 +1 @@ +../../images \ No newline at end of file diff --git a/latest/ug/clusters/management/metrics-server.adoc b/latest/ug/clusters/management/metrics-server.adoc new file mode 100644 index 00000000..c53b8355 --- /dev/null +++ b/latest/ug/clusters/management/metrics-server.adoc @@ -0,0 +1,80 @@ +//!!NODE_ROOT
+include::../../attributes.txt[] + +[.topic] +[[metrics-server,metrics-server.title]] += View resource usage with the [.noloc]`Kubernetes` [.noloc]`Metrics Server` +:info_doctype: section +:info_title: View resource usage with the KubernetesMetrics Server +:info_titleabbrev: Metrics server +:info_abstract: Use the Kubernetes Metrics Server to view resource usage data on your Amazon EKS cluster for autoscaling and monitoring. + +[abstract] +-- +Use the Kubernetes Metrics Server to view resource usage data on your Amazon EKS cluster for autoscaling and monitoring. +-- + +The [.noloc]`Kubernetes` Metrics Server is an aggregator of resource usage data in your cluster, and it isn't deployed by default in Amazon EKS clusters. For more information, see https://github.com/kubernetes-sigs/metrics-server[Kubernetes Metrics Server] on [.noloc]`GitHub`. The Metrics Server is commonly used by other [.noloc]`Kubernetes` add ons, such as the <> or the <>. For more information, see https://kubernetes.io/docs/tasks/debug/debug-cluster/resource-metrics-pipeline/[Resource metrics pipeline] in the [.noloc]`Kubernetes` documentation. This topic explains how to deploy the [.noloc]`Kubernetes` Metrics Server on your Amazon EKS cluster. + +[IMPORTANT] +==== + +The metrics are meant for point-in-time analysis and aren't an accurate source for historical analysis. They can't be used as a monitoring solution or for other non-auto scaling purposes. For information about monitoring tools, see <>. +==== + +== Deploy as community add-on with Amazon EKS Add-ons + +*New: You can now deploy Metrics Server as a community add-on using the {aws} console or Amazon EKS APIs.* + +=== Deploy with {aws} console + +. Open your EKS cluster in the {aws} console +. From the "Add-ons" tab, select *Get More Add-ons*. +. From the "Community add-ons" section, select *Metrics Server* and then *Next* +. EKS determines the appropriate version of the add-on for your cluster. You can change the version using the *Version* dropdown menu. +. Select *Next* and then *Create* to install the add-on. + +=== Additional resources + +Learn more about <>. + +You install or update community add-ons in the same way as other Amazon EKS Add-ons. + +* <> +* <> +* <> + + +== Deploy with manifest + +*New: You can now deploy Metrics Server as a community add-on using the {aws} console or Amazon EKS APIs. These manifest install instructions will be archived.* + +. Deploy the Metrics Server with the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml +---- ++ +If you are using Fargate, you will need to change this file. In the default configuration, the metrics server uses port 10250. This port is reserved on Fargate. Replace references to port 10250 in components.yaml with another port, such as 10251. +. Verify that the `metrics-server` deployment is running the desired number of [.noloc]`Pods` with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get deployment metrics-server -n kube-system +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY UP-TO-DATE AVAILABLE AGE +metrics-server 1/1 1 1 6m +---- +. Test the metrics server is working by displaying resource (CPU/memory) usage of nodes. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl top nodes +---- +. If you receive the error message `Error from server (Forbidden)`, you need to update your Kubernetes RBAC configuration. Your Kubernetes RBAC identity needs sufficent permissions to read cluster metrics. Review the https://github.com/kubernetes-sigs/metrics-server/blob/e285375a49e3bf77ddd78c08a05aaa44f2249ebd/manifests/base/rbac.yaml#L5C9-L5C41[minimum required Kubernetes API permissions for reading metrics] on GitHub. Learn how to <>. diff --git a/latest/ug/clusters/management/service-quotas.adoc b/latest/ug/clusters/management/service-quotas.adoc new file mode 100644 index 00000000..132c3942 --- /dev/null +++ b/latest/ug/clusters/management/service-quotas.adoc @@ -0,0 +1,95 @@ +//!!NODE_ROOT
+include::../../attributes.txt[] + +[.topic] +[[service-quotas,service-quotas.title]] += View and manage Amazon EKS and [.noloc]`Fargate` service quotas +:info_doctype: section +:info_title: View and manage Amazon EKS and Fargate service quotas +:info_titleabbrev: Service quotas +:info_abstract: Use Service Quotas to view and manage Amazon EKS and {aws} Fargate quotas from the {aws-management-console} or {aws} CLI. + +[abstract] +-- +Use Service Quotas to view and manage Amazon EKS and {aws} Fargate quotas from the {aws-management-console} or {aws} CLI. +-- + +Amazon EKS has integrated with Service Quotas, an {aws} service that you can use to view and manage your quotas from a central location. For more information, see link:servicequotas/latest/userguide/intro.html[What Is Service Quotas?,type="documentation"] in the _Service Quotas User Guide_. With Service Quotas integration, you can quickly look up the value of your Amazon EKS and {aws} Fargate service quotas using the {aws-management-console} and {aws} CLI. + + +[[service-quotas-console,service-quotas-console.title]] +== View EKS service quotas in the {aws} Management Console + +. Open the link:servicequotas/home/services/eks/quotas["Service Quotas console",type="console"]. +. In the left navigation pane, choose *{aws} services*. +. From the *{aws} services* list, search for and select *Amazon Elastic Kubernetes Service (Amazon EKS)* or *{aws} Fargate*. ++ +In the *Service quotas* list, you can see the service quota name, applied value (if it's available), {aws} default quota, and whether the quota value is adjustable. +. To view additional information about a service quota, such as the description, choose the quota name. +. (Optional) To request a quota increase, select the quota that you want to increase, select *Request quota increase*, enter or select the required information, and select *Request*. + + +To work more with service quotas using the {aws-management-console}, see the link:servicequotas/latest/userguide/intro.html[Service Quotas User Guide,type="documentation"]. To request a quota increase, see link:servicequotas/latest/userguide/request-quota-increase.html[Requesting a Quota Increase,type="documentation"] in the _Service Quotas User Guide_. + + +== View EKS service quotas with the {aws} CLI + +Run the following command to view your Amazon EKS quotas. + +[source,bash,subs="verbatim,attributes"] +---- +aws service-quotas list-aws-default-service-quotas \ + --query 'Quotas[*].{Adjustable:Adjustable,Name:QuotaName,Value:Value,Code:QuotaCode}' \ + --service-code eks \ + --output table +---- + +Run the following command to view your Fargate quotas. + +[source,bash,subs="verbatim,attributes"] +---- +aws service-quotas list-aws-default-service-quotas \ + --query 'Quotas[*].{Adjustable:Adjustable,Name:QuotaName,Value:Value,Code:QuotaCode}' \ + --service-code fargate \ + --output table +---- + +NOTE: The quota returned is the number of Amazon ECS tasks or Amazon EKS [.noloc]`Pods` that can run concurrently on Fargate in this account in the current {aws} Region. + +To work more with service quotas using the {aws} CLI, see link:cli/latest/reference/service-quotas/index.html[service-quotas,type="documentation"] in the _{aws} CLI Command Reference_. To request a quota increase, see the link:cli/latest/reference/service-quotas/request-service-quota-increase.html[request-service-quota-increase,type="documentation"] command in the _{aws} CLI Command Reference_. + + +[[sq-text,sq-text.title]] +== Amazon EKS service quotas + +{aws} recommends using the {aws} management console to view your current quotas. For more information, see <>. + +To view the default EKS service quotas, see link:general/latest/gr/eks.html#limits_eks["Amazon Elastic Kubernetes Service endpoints and quotas",type="documentation"] in the _{aws} General Reference_. + +These service quotas are listed under *Amazon Elastic Kubernetes Service (Amazon EKS)* in the Service Quotas console. To request a quota increase for values that are shown as adjustable, see link:servicequotas/latest/userguide/request-quota-increase.html[Requesting a quota increase,type="documentation"] in the _Service Quotas User Guide_. + +[NOTE] +==== +The following quotas aren't available in Service Quotas: + +* Pod Identity associations per cluster is [.noloc]`1000` in each supported region and this quota isn't adjustable. +* You can use up to 15 CIDRs for Remote Node Networks and 15 CIDRs for Remote Pod Networks per cluster for hybrid nodes. This quota isn't adjustable. +==== + +[[service-quotas-eks-fargate,service-quotas-eks-fargate.title]] +== {aws} Fargate service quotas + +The *{aws} Fargate* service in the Service Quotas console lists several service quotas. You can configure alarms that alert you when your usage approaches a service quota. For more information, see <>. + +New {aws} accounts might have lower initial quotas that can increase over time. Fargate constantly monitors the account usage within each {aws} Region, and then automatically increases the quotas based on the usage. You can also request a quota increase for values that are shown as adjustable. For more information, see link:servicequotas/latest/userguide/request-quota-increase.html[Requesting a quota increase,type="documentation"] in the _Service Quotas User Guide_. + +{aws} reccomends using the {aws} management console to view your current quotas. For more information, see <>. + +To view default {aws} Fargate on EKS service quotas, see link:general/latest/gr/eks.html#service-quotas-eks-fargate["Fargate service quotas",type="documentation"] in the _{aws} General Reference_. + +[NOTE] +==== + +Fargate additionally enforces Amazon ECS tasks and Amazon EKS [.noloc]`Pods` launch rate quotas. For more information, see link:AmazonECS/latest/developerguide/throttling.html[{aws} Fargate throttling quotas,type="documentation"] in the _Amazon ECS guide_. + +==== diff --git a/latest/ug/clusters/platform-versions.adoc b/latest/ug/clusters/platform-versions.adoc new file mode 100644 index 00000000..a69c27f8 --- /dev/null +++ b/latest/ug/clusters/platform-versions.adoc @@ -0,0 +1,859 @@ +//!!NODE_ROOT
+ +[.topic] +[[platform-versions,platform-versions.title]] += View Amazon EKS platform versions for each [.noloc]`Kubernetes` version +:info_doctype: section +:info_title: View Amazon EKS platform versions for each Kubernetes version +:info_titleabbrev: Platform versions + +include::../attributes.txt[] + +Amazon EKS platform versions represent the capabilities of the Amazon EKS cluster control plane, such as which [.noloc]`Kubernetes` API server flags are enabled, as well as the current [.noloc]`Kubernetes` patch version. Each [.noloc]`Kubernetes` minor version has one or more associated Amazon EKS platform versions. The platform versions for different [.noloc]`Kubernetes` minor versions are independent. You can <> using the {aws} CLI or {aws-management-console}. If you have a local cluster on {aws} Outposts, see <> instead of this topic. + +When a new [.noloc]`Kubernetes` minor version is available in Amazon EKS, such as 1.30, the initial Amazon EKS platform version for that [.noloc]`Kubernetes` minor version starts at `eks.1`. However, Amazon EKS releases new platform versions periodically to enable new [.noloc]`Kubernetes` control plane settings and to provide security fixes. + +When new Amazon EKS platform versions become available for a minor version: + + + +* The Amazon EKS platform version number is incremented (`eks.`). +* Amazon EKS automatically upgrades all existing clusters to the latest Amazon EKS platform version for their corresponding [.noloc]`Kubernetes` minor version. Automatic upgrades of existing Amazon EKS platform versions are rolled out incrementally. The roll-out process might take some time. If you need the latest Amazon EKS platform version features immediately, you should create a new Amazon EKS cluster. ++ +If your cluster is more than two platform versions behind the current platform version, then it's possible that Amazon EKS wasn't able to automatically update your cluster. For details of what may cause this, see <>. +* Amazon EKS might publish a new node AMI with a corresponding patch version. However, all patch versions are compatible between the EKS control plane and node AMIs for a given [.noloc]`Kubernetes` minor version. + +New Amazon EKS platform versions don't introduce breaking changes or cause service interruptions. + +Clusters are always created with the latest available Amazon EKS platform version (`eks.`) for the specified [.noloc]`Kubernetes` version. If you update your cluster to a new [.noloc]`Kubernetes` minor version, your cluster receives the current Amazon EKS platform version for the [.noloc]`Kubernetes` minor version that you updated to. + +The current and recent Amazon EKS platform versions are described in the following tables. + +[NOTE] +==== + +{aws} recently disabled some platform versions published in June 2024. The platform versions had stability issues. No action is needed. + +==== + + +[[platform-versions-1.31,platform-versions-1.31.title]] +== [.noloc]`Kubernetes` version `1.31` + +The following admission controllers are enabled for all `1.31` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.31.2` +| `eks.12` +| New platform version with Amazon EKS Hybrid Nodes support and enhancements to control plane observability. See <> and see link:blogs/containers/amazon-eks-enhances-kubernetes-control-plane-observability/[Amazon EKS enhances performance observability,type="blog"], respectively. +| November 15, 2024 + +| `1.31.1` +| `eks.6` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.31.0` +| `eks.4` +| Initial release of Kubernetes version `1.31` for EKS. For more information, see <>. +| September 26, 2024 + +|=== + + +[[platform-versions-1.30,platform-versions-1.30.title]] +== [.noloc]`Kubernetes` version `1.30` + +The following admission controllers are enabled for all `1.30` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.30.6` +| `eks.20` +| New platform version with Amazon EKS Hybrid Nodes support and enhancements to control plane observability. See <> and see link:blogs/containers/amazon-eks-enhances-kubernetes-control-plane-observability/[Amazon EKS enhances performance observability,type="blog"], respectively. +| November 15, 2024 + +| `1.30.5` +| `eks.12` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.30.4` +| `eks.8` +| New platform version with security fixes and enhancements. +| September 3, 2024 + + +| `1.30.3` +| `eks.7` +| New platform version with security fixes and enhancements. +| August 28, 2024 + +| `1.30.3` +| `eks.6` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.30.2` +| `eks.5` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.30.0` +| `eks.2` +| Initial release of Kubernetes version `1.30` for EKS. For more information, see <>. +| May 23, 2024 +|=== + +[[platform-versions-1.29,platform-versions-1.29.title]] +== [.noloc]`Kubernetes` version `1.29` + +The following admission controllers are enabled for all `1.29` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.29.10` +| `eks.23` +| New platform version with Amazon EKS Hybrid Nodes support and enhancements to control plane observability. See <> and see link:blogs/containers/amazon-eks-enhances-kubernetes-control-plane-observability/[Amazon EKS enhances performance observability,type="blog"], respectively. +| November 15, 2024 + +| `1.29.9` +| `eks.17` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.29.8` +| `eks.13` +| New platform version with security fixes and enhancements. +| September 3, 2024 + + +| `1.29.7` +| `eks.12` +| New platform version with security fixes and enhancements. +| August 28, 2024 + +| `1.29.7` +| `eks.11` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.29.6` +| `eks.10` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.29.4` +| `eks.7` +| New platform version with CoreDNS autoscaling, security fixes and enhancements. For more information about CoreDNS autoscaling, see <>. +| May 16, 2024 + +| `1.29.3` +| `eks.6` +| New platform version with security fixes and enhancements. +| April 18, 2024 + +| `1.29.1` +| `eks.5` +| New platform version with security fixes and enhancements. +| March 29, 2024 + +| `1.29.1` +| `eks.4` +| New platform version with security fixes and enhancements. +| March 20, 2024 + +| `1.29.1` +| `eks.3` +| New platform version with security fixes and enhancements. +| March 12, 2024 + +| `1.29.0` +| `eks.1` +| Initial release of Kubernetes version `1.29` for EKS. For more information, see <>. +| January 23, 2024 +|=== + +[[platform-versions-1.28,platform-versions-1.28.title]] +== [.noloc]`Kubernetes` version `1.28` + +The following admission controllers are enabled for all `1.28` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.28.15` +| `eks.29` +| New platform version with Amazon EKS Hybrid Nodes support and enhancements to control plane observability. See <> and see link:blogs/containers/amazon-eks-enhances-kubernetes-control-plane-observability/[Amazon EKS enhances performance observability,type="blog"], respectively. +| November 15, 2024 + +| `1.28.14` +| `eks.23` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.28.13` +| `eks.19` +| New platform version with security fixes and enhancements. +| September 3, 2024 + +| `1.28.12` +| `eks.18` +| New platform version with security fixes and enhancements. +| August 28, 2024 + +| `1.28.11` +| `eks.17` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.28.11` +| `eks.16` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.28.9` +| `eks.13` +| New platform version with CoreDNS autoscaling, security fixes and enhancements. For more information about CoreDNS autoscaling, see <>. +| May 16, 2024 + +| `1.28.8` +| `eks.12` +| New platform version with security fixes and enhancements. +| April 18, 2024 + +| `1.28.7` +| `eks.11` +| New platform version with security fixes and enhancements. +| March 29, 2024 + +| `1.28.7` +| `eks.10` +| New platform version with security fixes and enhancements. +| March 20, 2024 + +| `1.28.6` +| `eks.9` +| New platform version with security fixes and enhancements. +| March 12, 2024 + +| `1.28.5` +| `eks.7` +| New platform version with security fixes and enhancements. +| January 17, 2024 + +| `1.28.4` +| `eks.6` +| New platform version with <>, security fixes and enhancements. +| December 14, 2023 + +| `1.28.4` +| `eks.5` +| New platform version with security fixes and enhancements. +| December 12, 2023 + +| `1.28.3` +| `eks.4` +| New platform version with <>, security fixes and enhancements. +| November 10, 2023 + +| `1.28.3` +| `eks.3` +| New platform version with security fixes and enhancements. +| November 3, 2023 + +| `1.28.2` +| `eks.2` +| New platform version with security fixes and enhancements. +| October 16, 2023 + +| `1.28.1` +| `eks.1` +| Initial release of Kubernetes version `1.28` for EKS. For more information, see <>. +| September 26, 2023 +|=== + +[[platform-versions-1.27,platform-versions-1.27.title]] +== [.noloc]`Kubernetes` version `1.27` + +The following admission controllers are enabled for all `1.27` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.27.16` +| `eks.33` +| New platform version with Amazon EKS Hybrid Nodes support, security fixes and enhancements. For more information about Amazon EKS Hybrid Nodes, see <>. +| November 15, 2024 + +| `1.27.16` +| `eks.27` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.27.16` +| `eks.23` +| New platform version with security fixes and enhancements. +| September 3, 2024 + +| `1.27.16` +| `eks.22` +| New platform version with security fixes and enhancements. +| August 28, 2024 + +| `1.27.16` +| `eks.21` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.27.15` +| `eks.20` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.27.13` +| `eks.17` +| New platform version with CoreDNS autoscaling, security fixes and enhancements. For more information about CoreDNS autoscaling, see <>. +| May 16, 2024 + +| `1.27.12` +| `eks.16` +| New platform version with security fixes and enhancements. +| April 18, 2024 + +| `1.27.11` +| `eks.15` +| New platform version with security fixes and enhancements. +| March 29, 2024 + +| `1.27.11` +| `eks.14` +| New platform version with security fixes and enhancements. +| March 20, 2024 + +| `1.27.10` +| `eks.13` +| New platform version with security fixes and enhancements. +| March 12, 2024 + +| `1.27.9` +| `eks.11` +| New platform version with security fixes and enhancements. +| January 17, 2024 + +| `1.27.8` +| `eks.10` +| New platform version with <>, security fixes and enhancements. +| December 14, 2023 + +| `1.27.8` +| `eks.9` +| New platform version with security fixes and enhancements. +| December 12, 2023 + +| `1.27.7` +| `eks.8` +| New platform version with <>, security fixes and enhancements. +| November 10, 2023 + +| `1.27.7` +| `eks.7` +| New platform version with security fixes and enhancements. +| November 3, 2023 + +| `1.27.6` +| `eks.6` +| New platform version with security fixes and enhancements. +| October 16, 2023 + +| `1.27.4` +| `eks.5` +| New platform version with security fixes and enhancements. +| August 30, 2023 + +| `1.27.4` +| `eks.4` +| New platform version with security fixes and enhancements. +| July 30, 2023 + +| `1.27.3` +| `eks.3` +| New platform version with security fixes and enhancements. +| June 30, 2023 + +| `1.27.2` +| `eks.2` +| New platform version with security fixes and enhancements. +| June 9, 2023 + +| `1.27.1` +| `eks.1` +| Initial release of Kubernetes version `1.27` for EKS. For more information, see <>. +| May 24, 2023 +|=== + +[[platform-versions-1.26,platform-versions-1.26.title]] +== [.noloc]`Kubernetes` version `1.26` + +The following admission controllers are enabled for all `1.26` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.26.15` +| `eks.35` +| New platform version with Amazon EKS Hybrid Nodes support, security fixes and enhancements. For more information about Amazon EKS Hybrid Nodes, see <>. +| November 15, 2024 + +| `1.26.15` +| `eks.28` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.26.15` +| `eks.24` +| New platform version with security fixes and enhancements. +| September 3, 2024 + +| `1.26.15` +| `eks.23` +| New platform version with security fixes and enhancements. +| August 28, 2024 + +| `1.26.15` +| `eks.22` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.26.15` +| `eks.21` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.26.15` +| `eks.18` +| New platform version with CoreDNS autoscaling, security fixes and enhancements. For more information about CoreDNS autoscaling, see <>. +| May 16, 2024 + +| `1.26.15` +| `eks.17` +| New platform version with security fixes and enhancements. +| April 18, 2024 + +| `1.26.14` +| `eks.16` +| New platform version with security fixes and enhancements. +| March 29, 2024 + +| `1.26.14` +| `eks.15` +| New platform version with security fixes and enhancements. +| March 20, 2024 + +| `1.26.13` +| `eks.14` +| New platform version with security fixes and enhancements. +| March 12, 2024 + +| `1.26.12` +| `eks.12` +| New platform version with security fixes and enhancements. +| January 17, 2024 + +| `1.26.11` +| `eks.11` +| New platform version with <>, security fixes and enhancements. +| December 14, 2023 + +| `1.26.11` +| `eks.10` +| New platform version with security fixes and enhancements. +| December 12, 2023 + +| `1.26.10` +| `eks.9` +| New platform version with <>, security fixes and enhancements. +| November 10, 2023 + +| `1.26.10` +| `eks.8` +| New platform version with security fixes and enhancements. +| November 3, 2023 + +| `1.26.9` +| `eks.7` +| New platform version with security fixes and enhancements. +| October 16, 2023 + +| `1.26.7` +| `eks.6` +| New platform version with security fixes and enhancements. +| August 30, 2023 + +| `1.26.7` +| `eks.5` +| New platform version with security fixes and enhancements. +| July 30, 2023 + +| `1.26.6` +| `eks.4` +| New platform version with security fixes and enhancements. +| June 30, 2023 + +| `1.26.5` +| `eks.3` +| New platform version with security fixes and enhancements. +| June 9, 2023 + +| `1.26.4` +| `eks.2` +| New platform version with security fixes and enhancements. +| May 5, 2023 + +| `1.26.2` +| `eks.1` +| Initial release of Kubernetes version `1.26` for EKS. For more information, see <>. +| April 11, 2023 +|=== + +[[platform-versions-1.25,platform-versions-1.25.title]] +== [.noloc]`Kubernetes` version `1.25` + +The following admission controllers are enabled for all `1.25` platform versions: `NodeRestriction`, `ExtendedResourceToleration`, `NamespaceLifecycle`, `LimitRanger`, `ServiceAccount`, `TaintNodesByCondition`, `PodSecurity`, `Priority`, `DefaultTolerationSeconds`, `DefaultStorageClass`, `StorageObjectInUseProtection`, `PersistentVolumeClaimResize`, `RuntimeClass`, `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `MutatingAdmissionWebhook`, `ValidatingAdmissionWebhook`, `ResourceQuota`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.25.16` +| `eks.35` +| New platform version with Amazon EKS Hybrid Nodes support, security fixes and enhancements. For more information about Amazon EKS Hybrid Nodes, see <>. +| November 15, 2024 + +| `1.25.16` +| `eks.29` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.25.16` +| `eks.25` +| New platform version with security fixes and enhancements. +| September 3, 2024 + + +| `1.25.16` +| `eks.24` +| New platform version with security fixes and enhancements. +| August 28, 2024 + +| `1.25.16` +| `eks.23` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.25.16` +| `eks.22` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.25.16` +| `eks.19` +| New platform version with CoreDNS autoscaling, security fixes and enhancements. For more information about CoreDNS autoscaling, see <>. +| May 16, 2024 + +| `1.25.16` +| `eks.18` +| New platform version with security fixes and enhancements. +| April 18, 2024 + +| `1.25.16` +| `eks.17` +| New platform version with security fixes and enhancements. +| March 29, 2024 + +| `1.25.16` +| `eks.16` +| New platform version with security fixes and enhancements. +| March 20, 2024 + +| `1.25.16` +| `eks.15` +| New platform version with security fixes and enhancements. +| March 12, 2024 + +| `1.25.16` +| `eks.13` +| New platform version with security fixes and enhancements. +| January 17, 2024 + +| `1.25.16` +| `eks.12` +| New platform version with <>, security fixes and enhancements. +| December 14, 2023 + +| `1.25.16` +| `eks.11` +| New platform version with security fixes and enhancements. +| December 12, 2023 + +| `1.25.15` +| `eks.10` +| New platform version with <>, security fixes and enhancements. +| November 10, 2023 + +| `1.25.15` +| `eks.9` +| New platform version with security fixes and enhancements. +| November 3, 2023 + +| `1.25.14` +| `eks.8` +| New platform version with security fixes and enhancements. +| October 16, 2023 + +| `1.25.12` +| `eks.7` +| New platform version with security fixes and enhancements. +| August 30, 2023 + +| `1.25.12` +| `eks.6` +| New platform version with security fixes and enhancements. +| July 30, 2023 + +| `1.25.11` +| `eks.5` +| New platform version with security fixes and enhancements. +| June 30, 2023 + +| `1.25.10` +| `eks.4` +| New platform version with security fixes and enhancements. +| June 9, 2023 + +| `1.25.9` +| `eks.3` +| New platform version with security fixes and enhancements. +| May 5, 2023 + +| `1.25.8` +| `eks.2` +| New platform version with security fixes and enhancements. +| March 24, 2023 + +| `1.25.6` +| `eks.1` +| Initial release of Kubernetes version `1.25` for EKS. For more information, see <>. +| February 21, 2023 +|=== + +[[platform-versions-1.24,platform-versions-1.24.title]] +== [.noloc]`Kubernetes` version `1.24` + +The following admission controllers are enabled for all `1.24` platform versions: `CertificateApproval`, `CertificateSigning`, `CertificateSubjectRestriction`, `DefaultIngressClass`, `DefaultStorageClass`, `DefaultTolerationSeconds`, `ExtendedResourceToleration`, `LimitRanger`, `MutatingAdmissionWebhook`, `NamespaceLifecycle`, `NodeRestriction`, `PersistentVolumeClaimResize`, `Priority`, `PodSecurityPolicy`, `ResourceQuota`, `RuntimeClass`, `ServiceAccount`, `StorageObjectInUseProtection`, `TaintNodesByCondition`, and `ValidatingAdmissionWebhook`. + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes version +| EKS platform version +| Release notes +| Release date + +| `1.24.17` +| `eks.39` +| New platform version with security fixes and enhancements. +| November 15, 2024 + +| `1.24.17` +| `eks.32` +| New platform version with security fixes and enhancements. +| October 21, 2024 + +| `1.24.17` +| `eks.28` +| New platform version with security fixes and enhancements. +| September 3, 2024 + + +| `1.24.17` +| `eks.27` +| New platform version with security fixes and enhancements. +| August 28, 2024 + + +| `1.24.17` +| `eks.26` +| New platform version with security fixes and enhancements. +| August 9, 2024 + +| `1.24.17` +| `eks.25` +| New platform version with security fixes and enhancements. +| July 2, 2024 + +| `1.24.17` +| `eks.22` +| New platform version with security fixes and enhancements. +| May 16, 2024 + +| `1.24.17` +| `eks.21` +| New platform version with security fixes and enhancements. +| April 18, 2024 + +| `1.24.17` +| `eks.20` +| New platform version with security fixes and enhancements. +| March 29, 2024 + +| `1.24.17` +| `eks.19` +| New platform version with security fixes and enhancements. +| March 20, 2024 + +| `1.24.17` +| `eks.18` +| New platform version with security fixes and enhancements. +| March 12, 2024 + +| `1.24.17` +| `eks.16` +| New platform version with security fixes and enhancements. +| January 17, 2024 + +| `1.24.17` +| `eks.15` +| New platform version with <>, security fixes and enhancements. +| December 14, 2023 + +| `1.24.17` +| `eks.14` +| New platform version with security fixes and enhancements. +| December 12, 2023 + +| `1.24.17` +| `eks.13` +| New platform version with <>, security fixes and enhancements. +| November 10, 2023 + +| `1.24.17` +| `eks.12` +| New platform version with security fixes and enhancements. +| November 3, 2023 + +| `1.24.17` +| `eks.11` +| New platform version with security fixes and enhancements. +| October 16, 2023 + +| `1.24.16` +| `eks.10` +| New platform version with security fixes and enhancements. +| August 30, 2023 + +| `1.24.16` +| `eks.9` +| New platform version with security fixes and enhancements. +| July 30, 2023 + +| `1.24.15` +| `eks.8` +| New platform version with security fixes and enhancements. +| June 30, 2023 + +| `1.24.14` +| `eks.7` +| New platform version with security fixes and enhancements. +| June 9, 2023 + +| `1.24.13` +| `eks.6` +| New platform version with security fixes and enhancements. +| May 5, 2023 + +| `1.24.12` +| `eks.5` +| New platform version with security fixes and enhancements. +| March 24, 2023 + +| `1.24.8` +| `eks.4` +| New platform version with security fixes and enhancements. +| January 27, 2023 + +| `1.24.7` +| `eks.3` +| New platform version with security fixes and enhancements. +| December 5, 2022 + +| `1.24.7` +| `eks.2` +| New platform version with security fixes and enhancements. +| November 18, 2022 + +| `1.24.7` +| `eks.1` +| Initial release of Kubernetes version `1.24` for EKS. For more information, see <>. +| November 15, 2022 +|=== + +[[get-platform-version,get-platform-version.title]] +== Get current platform version +. Open the Amazon EKS console. +. In the navigation pane, choose *Clusters*. +. In the list of clusters, choose the *Cluster Name* to check the platform version of. +. Choose the *Overview* tab. +. The *Platform Version* is available under in the *Details* section. +. Determine the *Name* of the cluster you want to check the platform version of. +. Run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query cluster.platformVersion +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +"eks.10" +---- + + +[[change-platform-version,change-platform-version.title]] +== Change platform version + +You cannot change the platform version of an EKS cluster. When new Amazon EKS platform versions become available for a [.noloc]`Kubernetes` version, EKS automatically upgrades all existing clusters to the latest Amazon EKS platform version for their corresponding [.noloc]`Kubernetes` version. Automatic upgrades of existing Amazon EKS platform versions are rolled out incrementally. You cannot use the {aws} Console or CLI to change the platform version. + +If you upgrade your [.noloc]`Kubernetes` version, your cluster will move onto the most recent platform version for the [.noloc]`Kubernetes` version. diff --git a/latest/ug/clusters/private-clusters.adoc b/latest/ug/clusters/private-clusters.adoc new file mode 100644 index 00000000..22477bb6 --- /dev/null +++ b/latest/ug/clusters/private-clusters.adoc @@ -0,0 +1,106 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[private-clusters,private-clusters.title]] += Deploy private clusters with limited internet access +:info_doctype: section +:info_title: Deploy private clusters with limited internet access +:info_titleabbrev: Private clusters +:info_abstract: Learn how to deploy and operate an Amazon EKS cluster without outbound internet access, including requirements for private container registries, endpoint access control, and VPC interface endpoints for {aws} services. + +[abstract] +-- +Learn how to deploy and operate an Amazon EKS cluster without outbound internet access, including requirements for private container registries, endpoint access control, and VPC interface endpoints for {aws} services. +-- + +This topic describes how to deploy an Amazon EKS cluster that is deployed on the {aws} Cloud, but doesn't have outbound internet access. If you have a local cluster on {aws} Outposts, see <>, instead of this topic. + +If you're not familiar with Amazon EKS networking, see link:containers/de-mystifying-cluster-networking-for-amazon-eks-worker-nodes[De-mystifying cluster networking for Amazon EKS worker nodes,type="blog"]. If your cluster doesn't have outbound internet access, then it must meet the following requirements: + + + +* Your cluster must pull images from a container registry that's in your VPC. You can create an Amazon Elastic Container Registry in your VPC and copy container images to it for your nodes to pull from. For more information, see <>. +* Your cluster must have endpoint private access enabled. This is required for nodes to register with the cluster endpoint. Endpoint public access is optional. For more information, see <>. +* Self-managed [.noloc]`Linux` and [.noloc]`Windows` nodes must include the following bootstrap arguments before they're launched. These arguments bypass Amazon EKS introspection and don't require access to the Amazon EKS API from within the VPC. ++ +.. Determine the value of your cluster's endpoint with the following command. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query cluster.endpoint --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +https://EXAMPLE108C897D9B2F1B21D5EXAMPLE.sk1.region-code.eks.amazonaws.com +---- +.. Determine the value of your cluster's certificate authority with the following command. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query cluster.certificateAuthority --output text +---- ++ +The returned output is a long string. +.. Replace [.replaceable]`cluster-endpoint` and [.replaceable]`certificate-authority` in the following commands with the values returned in the output from the previous commands. For more information about specifying bootstrap arguments when launching self-managed nodes, see <> and <>. ++ +** For [.noloc]`Linux` nodes: ++ +[source,bash,subs="verbatim,attributes"] +---- +--apiserver-endpoint cluster-endpoint --b64-cluster-ca certificate-authority +---- ++ +For additional arguments, see the https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap script] on [.noloc]`GitHub`. +** For [.noloc]`Windows` nodes: ++ +NOTE: If you're using custom service CIDR, then you need to specify it using the `-ServiceCIDR` parameter. Otherwise, the DNS resolution for [.noloc]`Pods` in the cluster will fail. ++ +[source,bash,subs="verbatim,attributes"] +---- +-APIServerEndpoint cluster-endpoint -Base64ClusterCA certificate-authority +---- ++ +For additional arguments, see <>. +* Your cluster's `aws-auth` `ConfigMap` must be created from within your VPC. For more information about creating and adding entries to the `aws-auth` `ConfigMap`, enter `eksctl create iamidentitymapping --help` in your terminal. If the `ConfigMap` doesn't exist on your server, `eksctl` will create it when you use the command to add an identity mapping. +* [.noloc]`Pods` configured with xref:iam-roles-for-service-accounts[IAM roles for service accounts,linkend=iam-roles-for-service-accounts] acquire credentials from an {aws} Security Token Service ({aws} STS) API call. If there is no outbound internet access, you must create and use an {aws} STS VPC endpoint in your VPC. Most {aws} `v1` SDKs use the global {aws} STS endpoint by default (`sts.amazonaws.com`), which doesn't use the {aws} STS VPC endpoint. To use the {aws} STS VPC endpoint, you might need to configure your SDK to use the regional {aws} STS endpoint (``sts.[.replaceable]`region-code`.amazonaws.com``). For more information, see <>. +* Your cluster's VPC subnets must have a VPC interface endpoint for any {aws} services that your [.noloc]`Pods` need access to. For more information, see link:vpc/latest/privatelink/create-interface-endpoint.html[Access an {aws} service using an interface VPC endpoint,type="documentation"]. Some commonly-used services and endpoints are listed in the following table. For a complete list of endpoints, see link:vpc/latest/privatelink/aws-services-privatelink-support.html[{aws} services that integrate with {aws} PrivateLink,type="documentation"] in the link:vpc/latest/privatelink/[{aws} PrivateLink Guide,type="documentation"]. ++ +We recommend that you link:vpc/latest/privatelink/interface-endpoints.html#enable-private-dns-names[enable private DNS names,type="documentation"] for your VPC endpoints, that way workloads can continue using public {aws} service endpoints without issues. ++ +[cols="1,1", options="header"] +|=== +|Service +|Endpoint + + +|Amazon EC2 +|com.amazonaws.[.replaceable]`region-code`.ec2 + +|Amazon Elastic Container Registry (for pulling container images) +|com.amazonaws.[.replaceable]`region-code`.ecr.api, com.amazonaws.[.replaceable]`region-code`.ecr.dkr, and com.amazonaws.[.replaceable]`region-code`.s3 + +|Application Load Balancers and Network Load Balancers +|com.amazonaws.[.replaceable]`region-code`.elasticloadbalancing + +|{aws} X-Ray +|com.amazonaws.[.replaceable]`region-code`.xray + +|Amazon CloudWatch Logs +|com.amazonaws.[.replaceable]`region-code`.logs + +|{aws} Security Token Service (required when using IAM roles for service accounts) +|com.amazonaws.[.replaceable]`region-code`.sts +|=== + + +* Any self-managed nodes must be deployed to subnets that have the VPC interface endpoints that you require. If you create a managed node group, the VPC interface endpoint security group must allow the CIDR for the subnets, or you must add the created node security group to the VPC interface endpoint security group. +* If your [.noloc]`Pods` use Amazon EFS volumes, then before deploying the <>, the driver's https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/deploy/kubernetes/overlays/stable/kustomization.yaml[kustomization.yaml] file must be changed to set the container images to use the same {aws} Region as the Amazon EKS cluster. +* You can use the <> to deploy {aws} Application Load Balancers (ALB) and Network Load Balancers to your private cluster. When deploying it, you should use https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/deploy/configurations/#controller-command-line-flags[command line flags] to set `enable-shield`, `enable-waf`, and `enable-wafv2` to false. https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/cert_discovery/#discover-via-ingress-rule-host[Certificate discovery] with hostnames from Ingress objects isn't supported. This is because the controller needs to reach {aws} Certificate Manager, which doesn't have a VPC interface endpoint. ++ +The controller supports network load balancers with IP targets, which are required for use with Fargate. For more information, see <> and <>. +* https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler] is supported. When deploying Cluster Autoscaler [.noloc]`Pods`, make sure that the command line includes `--aws-use-static-instance-list=true`. For more information, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#use-static-instance-list[Use Static Instance List] on [.noloc]`GitHub`. The worker node VPC must also include the {aws} STS VPC endpoint and autoscaling VPC endpoint. +* Some container software products use API calls that access the {aws} Marketplace Metering Service to monitor usage. Private clusters do not allow these calls, so you can't use these container types in private clusters. diff --git a/latest/ug/clusters/update-cluster.adoc b/latest/ug/clusters/update-cluster.adoc new file mode 100644 index 00000000..2a29b8ae --- /dev/null +++ b/latest/ug/clusters/update-cluster.adoc @@ -0,0 +1,256 @@ +//!!NODE_ROOT
+ +[.topic] +[[update-cluster,update-cluster.title]] += Update existing cluster to new Kubernetes version +:info_doctype: section +:info_title: Update existing cluster to new Kubernetes version +:info_titleabbrev: Update Kubernetes version +:info_abstract: Learn how to update your Amazon EKS cluster to the latest Kubernetes version, ensuring compatibility with nodes and add-ons, and maintaining high availability during the process. + +include::../attributes.txt[] + +[abstract] +-- +Learn how to update your Amazon EKS cluster to the latest Kubernetes version, ensuring compatibility with nodes and add-ons, and maintaining high availability during the process. +-- + +When a new [.noloc]`Kubernetes` version is available in Amazon EKS, you can update your Amazon EKS cluster to the latest version. + +[IMPORTANT] +==== + +Once you upgrade a cluster, you can't downgrade to a previous version. We recommend that, before you update to a new [.noloc]`Kubernetes` version, you review the information in <> and also review in the update steps in this topic. + +==== + +New [.noloc]`Kubernetes` versions sometimes introduce significant changes. Therefore, we recommend that you test the behavior of your applications against a new [.noloc]`Kubernetes` version before you update your production clusters. You can do this by building a continuous integration workflow to test your application behavior before moving to a new [.noloc]`Kubernetes` version. + +The update process consists of Amazon EKS launching new API server nodes with the updated [.noloc]`Kubernetes` version to replace the existing ones. Amazon EKS performs standard infrastructure and readiness health checks for network traffic on these new nodes to verify that they're working as expected. However, once you've started the cluster upgrade, you can't pause or stop it. If any of these checks fail, Amazon EKS reverts the infrastructure deployment, and your cluster remains on the prior [.noloc]`Kubernetes` version. Running applications aren't affected, and your cluster is never left in a non-deterministic or unrecoverable state. Amazon EKS regularly backs up all managed clusters, and mechanisms exist to recover clusters if necessary. We're constantly evaluating and improving our [.noloc]`Kubernetes` infrastructure management processes. + +To update the cluster, Amazon EKS requires up to five available IP addresses from the subnets that you specified when you created your cluster. Amazon EKS creates new cluster elastic network interfaces (network interfaces) in any of the subnets that you specified. The network interfaces may be created in different subnets than your existing network interfaces are in, so make sure that your security group rules allow <> for any of the subnets that you specified when you created your cluster. If any of the subnets that you specified when you created the cluster don't exist, don't have enough available IP addresses, or don't have security group rules that allows necessary cluster communication, then the update can fail. + +[NOTE] +==== + +To ensure that the API server endpoint for your cluster is always accessible, Amazon EKS provides a highly available [.noloc]`Kubernetes` control plane and performs rolling updates of API server instances during update operations. In order to account for changing IP addresses of API server instances supporting your [.noloc]`Kubernetes` API server endpoint, you must ensure that your API server clients manage reconnects effectively. Recent versions of `kubectl` and the [.noloc]`Kubernetes` client https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/#programmatic-access-to-the-api[libraries] that are officially supported, perform this reconnect process transparently. + +==== + +== Considerations for Amazon EKS Auto Mode + +* The compute capability of Amazon EKS Auto Mode controls the Kubernetes version of nodes. After you upgrade the control plane, EKS Auto Mode will begin incrementally updating managed nodes. EKS Auto Mode respects pod disruption budgets. +* You do not have to manually upgrade the capabilities of Amazon EKS Auto Mode, including the compute autoscaling, block storage, and load balancing capabilities. + +[[update-existing-cluster,update-existing-cluster.title]] +== Step 1: Prepare for upgrade +. Compare the [.noloc]`Kubernetes` version of your cluster control plane to the [.noloc]`Kubernetes` version of your nodes. ++ +** Get the [.noloc]`Kubernetes` version of your cluster control plane. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl version +---- +** Get the [.noloc]`Kubernetes` version of your nodes. This command returns all self-managed and managed Amazon EC2, Fargate, and hybrid nodes. Each Fargate [.noloc]`Pod` is listed as its own node. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes +---- + ++ +Before updating your control plane to a new [.noloc]`Kubernetes` version, make sure that the [.noloc]`Kubernetes` minor version of both the managed nodes and Fargate nodes in your cluster are the same as your control plane's version. For example, if your control plane is running version `1.29` and one of your nodes is running version `1.28`, then you must update your nodes to version `1.29` before updating your control plane to 1.30. We also recommend that you update your self-managed nodes and hybrid nodes to the same version as your control plane before updating the control plane. For more information, see <>, <>, and <>. If you have Fargate nodes with a minor version lower than the control plane version, first delete the [.noloc]`Pod` that's represented by the node. Then update your control plane. Any remaining [.noloc]`Pods` will update to the new version after you redeploy them. +. If the [.noloc]`Kubernetes` version that you originally deployed your cluster with was [.noloc]`Kubernetes` `1.25` or later, skip this step. ++ +By default, the [.noloc]`Pod` security policy admission controller is enabled on Amazon EKS clusters. Before updating your cluster, ensure that the proper [.noloc]`Pod` security policies are in place. This is to avoid potential security issues. You can check for the default policy with the `kubectl get psp eks.privileged` command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get psp eks.privileged +---- ++ +If you receive the following error, see <> before proceeding. ++ +[source,bash,subs="verbatim,attributes"] +---- +Error from server (NotFound): podsecuritypolicies.extensions "eks.privileged" not found +---- +. If the [.noloc]`Kubernetes` version that you originally deployed your cluster with was [.noloc]`Kubernetes` `1.18` or later, skip this step. ++ +You might need to remove a discontinued term from your [.noloc]`CoreDNS` manifest. ++ +.. Check to see if your [.noloc]`CoreDNS` manifest has a line that only has the word `upstream`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get configmap coredns -n kube-system -o jsonpath='{$.data.Corefile}' | grep upstream +---- ++ +If no output is returned, this means that your manifest doesn't have the line. If this is the case, skip to the next step. If the word `upstream` is returned, remove the line. +.. Remove the line near the top of the file that only has the word `upstream` in the configmap file. Don't change anything else in the file. After the line is removed, save the changes. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit configmap coredns -n kube-system -o yaml +---- + +== Step 2: Review upgrade considerations + +* If you're updating to version `1.23` and use Amazon EBS volumes in your cluster, then you must install the Amazon EBS CSI driver in your cluster before updating your cluster to version `1.23` to avoid workload disruptions. For more information, see <> and <>. +* Kubernetes `1.24` and later use `containerd` as the default container runtime. If you're switching to the `containerd` runtime and already have [.noloc]`Fluentd` configured for [.noloc]`Container Insights`, then you must migrate [.noloc]`Fluentd` to [.noloc]`Fluent Bit` before updating your cluster. The [.noloc]`Fluentd` parsers are configured to only parse log messages in JSON format. Unlike `dockerd`, the `containerd` container runtime has log messages that aren't in JSON format. If you don't migrate to [.noloc]`Fluent Bit`, some of the configured [.noloc]`Fluentd's` parsers will generate a massive amount of errors inside the [.noloc]`Fluentd` container. For more information on migrating, see link:AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html[Set up Fluent Bit as a DaemonSet to send logs to CloudWatch Logs,type="documentation"]. +** Because Amazon EKS runs a highly available control plane, you can update only one minor version at a time. For more information about this requirement, see https://kubernetes.io/docs/setup/version-skew-policy/#kube-apiserver[Kubernetes Version and Version Skew Support Policy]. Assume that your current cluster version is version `1.28` and you want to update it to version `1.30`. You must first update your version `1.28` cluster to version `1.29` and then update your version `1.29` cluster to version `1.30`. +* Review the version skew between the [.noloc]`Kubernetes` `kube-apiserver` and the `kubelet` on your nodes. ++ +** Starting from [.noloc]`Kubernetes` version `1.28`, `kubelet` may be up to three minor versions older than `kube-apiserver`. See https://kubernetes.io/releases/version-skew-policy/#kubelet[Kubernetes upstream version skew policy]. +** If the `kubelet` on your managed and Fargate nodes is on [.noloc]`Kubernetes` version `1.25` or newer, you can update your cluster up to three versions ahead without updating the `kubelet` version. For example, if the `kubelet` is on version `1.25`, you can update your Amazon EKS cluster version from `1.25` to `1.26`, to `1.27`, and to `1.28` while the `kubelet` remains on version `1.25`. +** If the `kubelet` on your managed and Fargate nodes is on [.noloc]`Kubernetes` version `1.24` or older, it may only be up to two minor versions older than the `kube-apiserver`. In other words, if the `kubelet` is version `1.24` or older, you can only update your cluster up to two versions ahead. For example, if the `kubelet` is on version `1.21`, you can update your Amazon EKS cluster version from `1.21` to `1.22`, and to `1.23`, but you will not be able to update the cluster to `1.24` while the `kubelet` remains on `1.21`. +* As a best practice before starting an update, make sure that the `kubelet` on your nodes is at the same [.noloc]`Kubernetes` version as your control plane. +* If your cluster is configured with a version of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` that is earlier than `1.8.0`, then we recommend that you update the plugin to the latest version before updating your cluster. To update the plugin, see <>. +* If you're updating your cluster to version `1.25` or later and have the [.noloc]`{aws} Load Balancer Controller` deployed in your cluster, then update the controller to version `2.4.7` or later _before_ updating your cluster version to `1.25`. For more information, see the xref:kubernetes-1.25[Kubernetes 1.25,linkend=kubernetes-1.25] release notes. + +== Step 3: Update cluster control plane + +You can submit the request to upgrade your EKS control plane version using: + +* xref:step3-eksctl[eksctl] +* xref:step3-console[the {aws} console] +* xref:step3-cli[the {aws} cli] + +[[step3-eksctl,step3-eksctl.title]] +=== Update cluster - eksctl + +This procedure requires `eksctl` version `{eksctl-min-version}` or later. You can check your version with the following command: + +[source,bash,subs="verbatim,attributes"] +---- +eksctl version +---- + +For instructions on how to install and update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. + +Update the [.noloc]`Kubernetes` version of your Amazon EKS control plane. Replace [.replaceable]`my-cluster` with your cluster name. Replace [.replaceable]`1.30` with the Amazon EKS supported version number that you want to update your cluster to. For a list of supported version numbers, see <>. + +[source,bash,subs="verbatim,attributes"] +---- +eksctl upgrade cluster --name my-cluster --version 1.30 --approve +---- + +The update takes several minutes to complete. + +Continue to <> + +[[step3-console,step3-console.title]] +=== Update cluster - {aws} console + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the Amazon EKS cluster to update and choose *Update cluster version*. +. For *[.noloc]`Kubernetes` version*, select the version to update your cluster to and choose *Update*. +. For *Cluster name*, enter the name of your cluster and choose *Confirm*. ++ +The update takes several minutes to complete. +. Continue to <> + +[[step3-cli,step3-cli.title]] +=== Update cluster - {aws} CLI + +. Update your Amazon EKS cluster with the following {aws} CLI command. Replace the [.replaceable]`example values` with your own. Replace [.replaceable]`1.30` with the Amazon EKS supported version number that you want to update your cluster to. For a list of supported version numbers, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-cluster-version --region region-code --name my-cluster --kubernetes-version 1.30 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ + "update": { + "id": "b5f0ba18-9a87-4450-b5a0-825e6e84496f", + "status": "InProgress", + "type": "VersionUpdate", + "params": [ + { + "type": "Version", + "value": "1.30" + }, + { + "type": "PlatformVersion", + "value": "eks.1" + } + ], +[...] + "errors": [] + } + +---- + +. Monitor the status of your cluster update with the following command. Use the cluster name and update ID that the previous command returned. When a `Successful` status is displayed, the update is complete. The update takes several minutes to complete. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-update --region region-code --name my-cluster --update-id b5f0ba18-9a87-4450-b5a0-825e6e84496f +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ + "update": { + "id": "b5f0ba18-9a87-4450-b5a0-825e6e84496f", + "status": "Successful", + "type": "VersionUpdate", + "params": [ + { + "type": "Version", + "value": "1.30" + }, + { + "type": "PlatformVersion", + "value": "eks.1" + } + ], +[...] + "errors": [] + } + +---- +. Continue to <> + +[[step4,step4.title]] +== Step 4: Update cluster components + +. After your cluster update is complete, update your nodes to the same [.noloc]`Kubernetes` minor version as your updated cluster. For more information, see <>, <>, and <>. Any new [.noloc]`Pods` that are launched on Fargate have a `kubelet` version that matches your cluster version. Existing Fargate [.noloc]`Pods` aren't changed. +. (Optional) If you deployed the [.noloc]`Kubernetes` Cluster Autoscaler to your cluster before updating the cluster, update the Cluster Autoscaler to the latest version that matches the [.noloc]`Kubernetes` major and minor version that you updated to. ++ +.. Open the Cluster Autoscaler https://github.com/kubernetes/autoscaler/releases[releases] page in a web browser and find the latest Cluster Autoscaler version that matches your cluster's [.noloc]`Kubernetes` major and minor version. For example, if your cluster's [.noloc]`Kubernetes` version is `1.30` find the latest Cluster Autoscaler release that begins with `1.30`. Record the semantic version number (``1.30.n``, for example) for that release to use in the next step. +.. Set the Cluster Autoscaler image tag to the version that you recorded in the previous step with the following command. If necessary, replace [.replaceable]`1.30`.[.replaceable]`n`` with your own value. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl -n kube-system set image deployment.apps/cluster-autoscaler cluster-autoscaler=registry.k8s.io/autoscaling/cluster-autoscaler:v1.30.n +---- +. (Clusters with GPU nodes only) If your cluster has node groups with GPU support (for example, `p3.2xlarge`), you must update the https://github.com/NVIDIA/k8s-device-plugin[NVIDIA device plugin for Kubernetes][.noloc]`DaemonSet` on your cluster. Replace [.replaceable]`vX.X.X` with your desired https://github.com/NVIDIA/k8s-device-plugin/releases[NVIDIA/k8s-device-plugin] version before running the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/vX.X.X/deployments/static/nvidia-device-plugin.yml +---- +. Update the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, [.noloc]`CoreDNS`, and `kube-proxy` add-ons. We recommend updating the add-ons to the minimum versions listed in <>. ++ +** If you are using Amazon EKS add-ons, select *Clusters* in the Amazon EKS console, then select the name of the cluster that you updated in the left navigation pane. Notifications appear in the console. They inform you that a new version is available for each add-on that has an available update. To update an add-on, select the *Add-ons* tab. In one of the boxes for an add-on that has an update available, select *Update now*, select an available version, and then select *Update*. +** Alternately, you can use the {aws} CLI or `eksctl` to update add-ons. For more information, see <>. +. If necessary, update your version of `kubectl`. You must use a `kubectl` version that is within one minor version difference of your Amazon EKS cluster control plane. For example, a `1.29` `kubectl` client works with [.noloc]`Kubernetes` `1.28`, `1.29`, and `1.30` clusters. You can check your currently installed version with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl version --client +---- + + +[[downgrade-cluster,downgrade-cluster.title]] +== Downgrade the [.noloc]`Kubernetes` version for an Amazon EKS cluster + +You cannot downgrade the [.noloc]`Kubernetes` of an Amazon EKS cluster. Instead, create a new cluster on a previous Amazon EKS version and migrate the workloads. diff --git a/latest/ug/clusters/view-support-status.adoc b/latest/ug/clusters/view-support-status.adoc new file mode 100644 index 00000000..d6e08980 --- /dev/null +++ b/latest/ug/clusters/view-support-status.adoc @@ -0,0 +1,20 @@ +include::../attributes.txt[] +[.topic] +[[view-support-status,view-support-status.title]] += View current cluster support period +:info_titleabbrev: View support period + +The *cluster support period* section of the {aws} console indicates if your cluster is _currently_ on standard or extended support. If your cluster support period is *Extended support*, you are being charged for EKS extended support. + +For more information about standard and extended support, see <>. + +. Navigate to the *Clusters* page in the EKS section of the {aws} Console. Confirm the console is set to the same {aws} region as the cluster you want to review. +. Review the *Support Period* column. If the value is *Standard support until...*, you are not currently being charged for extended support. You are within the standard support period. If the value is *Extended support...* this cluster is currently being charged for extended support. + + +[NOTE] +==== + +The *Support Period* cannot be retrieved with the {aws} API or CLI. + +==== diff --git a/latest/ug/clusters/view-upgrade-policy.adoc b/latest/ug/clusters/view-upgrade-policy.adoc new file mode 100644 index 00000000..5c8d515c --- /dev/null +++ b/latest/ug/clusters/view-upgrade-policy.adoc @@ -0,0 +1,51 @@ +include::../attributes.txt[] +[.topic] +[[view-upgrade-policy,view-upgrade-policy.title]] += View current cluster upgrade policy +:info_titleabbrev: View upgrade policy + +The *cluster upgrade policy* determines what happens to your cluster when it leaves the standard support period. If your upgrade policy is `EXTENDED`, the cluster will not be automatically upgraded, and will enter extended support. If your upgrade policy is `STANDARD`, it will be automatically upgraded. + +Amazon EKS controls for [.noloc]`Kubernetes` version policy allows you to choose the end of standard support behavior for your EKS clusters. With these controls you can decide which clusters should enter extended support and which clusters should be automatically upgraded at the end of standard support for a [.noloc]`Kubernetes` version. + +A minor version is under standard support in Amazon EKS for the first 14 months after it's released. Once a version is past the end of standard support date, it enters extended support for the next 12 months. Extended support allows you to stay at a specific [.noloc]`Kubernetes` version for longer at an additional cost per cluster hour. You can enable or disable extended support for an EKS Cluster. If you disable extended support, {aws} will automatically upgrade your cluster to the next version at the end of standard support. If you enable extended support, you can stay at the current version for an additional cost for a limited period of time. Plan to regularly upgrade your [.noloc]`Kubernetes` cluster, even if you use extended support. + +You can set the version policy for both new and existing clusters, using the `supportType` property. There are two options that can be used to set the version support policy: + + + +* `*STANDARD*` -- Your EKS cluster eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but you EKS cluster will automatically upgrade to the next supported [.noloc]`Kubernetes` version in standard support. +* `*EXTENDED*` -- Your EKS cluster will enter into extended support once the [.noloc]`Kubernetes` version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported [.noloc]`Kubernetes` version to stop incurring extended support charges. Clusters running on extended support will be eligible for automatic upgrade at the end of extended support. + +Extended support is enabled by default for new clusters, and existing clusters. You can view if extended support is enabled for a cluster in the {aws-management-console}, or by using the {aws} CLI. + +[IMPORTANT] +==== + +If you want your cluster to stay on its current [.noloc]`Kubernetes` version to take advantage of the extended support period, you must enable the extended support upgrade policy before the end of standard support period. + +==== + +You can only set the version support policy for your clusters while its running on Kubernetes version in standard support. Once the version enters extended support, you will not be able to change this setting until you are running on a version in standard support. + +For example, if you have set your version support policy as `standard` then you will not be able to change this setting after the Kubernetes version running on your cluster reaches the end of standard support. If you have set your version support policy as `extended` then you will not be able to change this setting after the Kubernetes version running on your cluster reaches end of standard support. In order to change the version support policy setting, your cluster must be running on a standard supported Kubernetes version. + +[[view-period-console,view-period-console.title]] +== View cluster upgrade policy ({aws} Console) +. Navigate to the *Clusters* page in the EKS section of the {aws} Console. Confirm the console is set to the same {aws} region as the cluster you want to review. +. Review the *Upgrade Policy* column. If the value is *Standard Support*, your cluster will not enter extended support. If the value is *Extended Support*, your cluster will enter extended support. + + +[[view-period-cli,view-period-cli.title]] +== View cluster upgrade policy ({aws} CLI) +. Verify the {aws} CLI is installed and you are logged in. link:cli/latest/userguide/getting-started-install.html[Learn how to update and install the {aws} CLI.,type="documentation"] +. Determine the name of your EKS cluster. Set the CLI to the same {aws} region as your EKS cluster. +. Run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster \ +--name \ +--query "cluster.upgradePolicy.supportType" +---- +. If the value is `STANDARD`, your cluster will not enter extended support. If the value is `EXTENDED`, your cluster will enter extended support. diff --git a/latest/ug/clusters/windows-support.adoc b/latest/ug/clusters/windows-support.adoc new file mode 100644 index 00000000..6826baef --- /dev/null +++ b/latest/ug/clusters/windows-support.adoc @@ -0,0 +1,204 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[windows-support,windows-support.title]] += Deploy [.noloc]`Windows` nodes on EKS clusters +:info_doctype: section +:info_title: Deploy Windows nodes on EKS \ + clusters +:info_titleabbrev: Enable Windows support +:info_abstract: Learn how to enable and manage Windows support for your Amazon EKS cluster to run \ + Windows containers alongside Linux containers. + +[abstract] +-- +Learn how to enable and manage Windows support for your Amazon EKS cluster to run Windows containers alongside Linux containers. +-- + +Before deploying [.noloc]`Windows` nodes, be aware of the following considerations. + + +* EKS Auto Mode does not support Windows nodes +* You can use host networking on Windows nodes using `HostProcess` Pods. For more information, see https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/[Create a Windows HostProcessPod] in the [.noloc]`Kubernetes` documentation. +* Amazon EKS clusters must contain one or more [.noloc]`Linux` or Fargate nodes to run core system [.noloc]`Pods` that only run on [.noloc]`Linux`, such as [.noloc]`CoreDNS`. +* The `kubelet` and `kube-proxy` event logs are redirected to the `EKS Windows` Event Log and are set to a 200 MB limit. +* You can't use xref:security-groups-for-pods[Assign security groups to individual pods,linkend=security-groups-for-pods] with [.noloc]`Pods` running on [.noloc]`Windows` nodes. +* You can't use xref:cni-custom-network[custom networking,linkend=cni-custom-network] with [.noloc]`Windows` nodes. +* You can't use `IPv6` with [.noloc]`Windows` nodes. +* [.noloc]`Windows` nodes support one elastic network interface per node. By default, the number of [.noloc]`Pods` that you can run per [.noloc]`Windows` node is equal to the number of IP addresses available per elastic network interface for the node's instance type, minus one. For more information, see link:AWSEC2/latest/WindowsGuide/using-eni.html#AvailableIpPerENI[IP addresses per network interface per instance type,type="documentation"] in the _Amazon EC2 User Guide_. +* In an Amazon EKS cluster, a single service with a load balancer can support up to 1024 back-end [.noloc]`Pods`. Each [.noloc]`Pod` has its own unique IP address. The previous limit of 64 [.noloc]`Pods` is no longer the case, after https://github.com/microsoft/Windows-Containers/issues/93[a Windows Server update] starting with https://support.microsoft.com/en-us/topic/march-22-2022-kb5011551-os-build-17763-2746-preview-690a59cd-059e-40f4-87e8-e9139cc65de4[OS Build 17763.2746]. +* Windows containers aren't supported for Amazon EKS [.noloc]`Pods` on Fargate. +* You can't use Amazon EKS Hybrid Nodes with Windows as the operating system for the host. +* You can't retrieve logs from the `vpc-resource-controller` Pod. You previously could when you deployed the controller to the data plane. +* There is a cool down period before an `IPv4` address is assigned to a new Pod. This prevents traffic from flowing to an older Pod with the same `IPv4` address due to stale `kube-proxy` rules. +* The source for the controller is managed on [.noloc]`GitHub`. To contribute to, or file issues against the controller, visit the https://github.com/aws/amazon-vpc-resource-controller-k8s[project] on [.noloc]`GitHub`. +* When specifying a custom AMI ID for [.noloc]`Windows` managed node groups, add `eks:kube-proxy-windows` to your {aws} IAM Authenticator configuration map. For more information, see <>. +* If preserving your available IPv4 addresses is crucial for your subnet, refer to https://aws.github.io/aws-eks-best-practices/windows/docs/networking/#ip-address-management[EKS Best Practices Guide - Windows Networking IP Address Management] for guidance. + + +* An existing cluster. The cluster must be running one of the [.noloc]`Kubernetes` versions and platform versions listed in the following table. Any [.noloc]`Kubernetes` and platform versions later than those listed are also supported. ++ +[[windows-support-platform-versions]] +[cols="1,1", options="header"] +|=== +|Kubernetes version +|Platform version + +|1.31 +|eks.4 + +|1.30 +|eks.2 + +|1.29 +|eks.1 + +|1.28 +|eks.1 + +|1.27 +|eks.1 + +|1.26 +|eks.1 + +|1.25 +|eks.1 + +|1.24 +|eks.2 +|=== +* Your cluster must have at least one (we recommend at least two) [.noloc]`Linux` node or Fargate [.noloc]`Pod` to run [.noloc]`CoreDNS`. If you enable legacy [.noloc]`Windows` support, you must use a [.noloc]`Linux` node (you can't use a Fargate [.noloc]`Pod`) to run [.noloc]`CoreDNS`. +* An existing <>. + + +[[enable-windows-support,enable-windows-support.title]] +== Enable [.noloc]`Windows` support +. If you don't have Amazon Linux nodes in your cluster and use security groups for [.noloc]`Pods`, skip to the next step. Otherwise, confirm that the `AmazonEKSVPCResourceController` managed policy is attached to your <>. Replace [.replaceable]`eksClusterRole` with your cluster role name. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam list-attached-role-policies --role-name eksClusterRole +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ + "AttachedPolicies": [ + { + "PolicyName": "AmazonEKSClusterPolicy", + "PolicyArn": "{arn-aws}iam::aws:policy/AmazonEKSClusterPolicy" + }, + { + "PolicyName": "AmazonEKSVPCResourceController", + "PolicyArn": "{arn-aws}iam::aws:policy/AmazonEKSVPCResourceController" + } + ] +} +---- ++ +If the policy is attached, as it is in the previous output, skip the next step. +. Attach the *link:aws-managed-policy/latest/reference/AmazonEKSVPCResourceController.html[AmazonEKSVPCResourceController,type="documentation"]* managed policy to your <>. Replace [.replaceable]`eksClusterRole` with your cluster role name. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --role-name eksClusterRole \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKSVPCResourceController +---- +. Create a file named [.replaceable]`vpc-resource-controller-configmap.yaml` with the following contents. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: amazon-vpc-cni + namespace: kube-system +data: + enable-windows-ipam: "true" +---- +. Apply the `ConfigMap` to your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f vpc-resource-controller-configmap.yaml +---- +. Verify that your `aws-auth` `ConfigMap` contains a mapping for the instance role of the [.noloc]`Windows` node to include the `eks:kube-proxy-windows` RBAC permission group. You can verify by running the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get configmap aws-auth -n kube-system -o yaml +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: aws-auth + namespace: kube-system +data: + mapRoles: | + - groups: + - system:bootstrappers + - system:nodes + - eks:kube-proxy-windows # This group is required for Windows DNS resolution to work + rolearn: {arn-aws}iam::111122223333:role/eksNodeRole + username: system:node:{{EC2PrivateDNSName}} +[...] +---- ++ +You should see `eks:kube-proxy-windows` listed under groups. If the group isn't specified, you need to update your `ConfigMap` or create it to include the required group. For more information about the `aws-auth` `ConfigMap`, see <>. + + +[[windows-support-pod-deployment,windows-support-pod-deployment.title]] +== Deploy Windows Pods + +When you deploy Pods to your cluster, you need to specify the operating system that they use if you're running a mixture of node types. + +For [.noloc]`Linux` [.noloc]`Pods`, use the following node selector text in your manifests. + +[source,yaml,subs="verbatim,attributes"] +---- +nodeSelector: + kubernetes.io/os: linux + kubernetes.io/arch: amd64 +---- + +For [.noloc]`Windows` [.noloc]`Pods`, use the following node selector text in your manifests. + +[source,yaml,subs="verbatim,attributes"] +---- +nodeSelector: + kubernetes.io/os: windows + kubernetes.io/arch: amd64 +---- + +You can deploy a <> to see the node selectors in use. + +[[windows-support-pod-density,windows-support-pod-density.title]] +== Support higher [.noloc]`Pod` density on Windows nodes + +In Amazon EKS, each [.noloc]`Pod` is allocated an `IPv4` address from your VPC. Due to this, the number of [.noloc]`Pods` that you can deploy to a node is constrained by the available IP addresses, even if there are sufficient resources to run more [.noloc]`Pods` on the node. Since only one elastic network interface is supported by a Windows node, by default, the maximum number of available IP addresses on a Windows node is equal to: + +[source,bash,subs="verbatim,attributes"] +---- +Number of private IPv4 addresses for each interface on the node - 1 +---- + +One IP address is used as the primary IP address of the network interface, so it can't be allocated to [.noloc]`Pods`. + +You can enable higher [.noloc]`Pod` density on Windows nodes by enabling IP prefix delegation. This feature enables you to assign a `/28` `IPv4` prefix to the primary network interface, instead of assigning secondary `IPv4` addresses. Assigning an IP prefix increases the maximum available `IPv4` addresses on the node to: + +[source,bash,subs="verbatim,attributes"] +---- +(Number of private IPv4 addresses assigned to the interface attached to the node - 1) * 16 +---- + +With this significantly larger number of available IP addresses, available IP addresses shouldn't limit your ability to scale the number of [.noloc]`Pods` on your nodes. For more information, see <>. diff --git a/latest/ug/clusters/zone-shift-enable.adoc b/latest/ug/clusters/zone-shift-enable.adoc new file mode 100644 index 00000000..78107e9b --- /dev/null +++ b/latest/ug/clusters/zone-shift-enable.adoc @@ -0,0 +1,69 @@ +//!!NODE_ROOT
+[.topic] +[[zone-shift-enable,zone-shift-enable.title]] += Enable EKS Zonal Shift to avoid impaired Availability Zones +:info_doctype: section +:info_titleabbrev: Enable Zonal Shift +:aws: pass:q[[.shared]``AWS``] + + + + +Amazon Application Recovery Controller (ARC) helps you manage and coordinate recovery for your applications across Availability Zones (AZs) and works with many services, including Amazon EKS. With EKS support for ARC zonal shift, you can shift in-cluster network traffic away from an impaired AZ. You can also authorize {aws} to monitor the health of your AZs and temporarily shift network traffic away from an unhealthy AZ on your behalf. + +*How to use EKS Zonal Shift:* + +. Enable your EKS cluster with Amazon Application Recovery Controller (ARC). This is done at the cluster level using the Amazon EKS Console, the {aws} CLI, CloudFormation, or eksctl. +. Once enabled, you can manage zonal shifts or zonal autoshifts using the ARC Console, the {aws} CLI, or the Zonal Shift and Zonal Autoshift APIs. + +Note that after you register an EKS cluster with ARC, you still need to configure ARC. For example, you can use the ARC console to configure Zonal Autoshift. + +For more detailed information about how EKS Zonal Shift works, and how to design your workloads to handle impaired availability zones, see <>. + +*Considerations:* + +* EKS Auto Mode does not support Amazon Application Recovery Controller, Zonal Shift, and Zonal Autoshift + +== What is Amazon Application Recovery Controller? + +Amazon Application Recovery Controller (ARC) helps you prepare for and accomplish faster recovery for applications running on {aws}. Zonal shift enables you to quickly recover from Availability Zone (AZ) impairments, by temporarily moving traffic for a supported resource away from an AZ, to healthy AZs in the {aws} Region. + +link:r53recovery/latest/dg/what-is-route53-recovery.html["Learn more about Amazon Application Recovery Controller (ARC)", type="documentation"] + +== What is zonal shift? + +Zonal shift is a capability in ARC that allows you to move traffic for a resource like an EKS cluster or an Elastic Load Balancer away from an Availability Zone in an {aws} Region to quickly mitigate an issue and quickly recover your application. You might choose to shift traffic, for example, because a bad deployment is causing latency issues, or because the Availability Zone is impaired. A zonal shift requires no advance configuration steps. + +link:r53recovery/latest/dg/arc-zonal-shift.how-it-works.html["Learn more about ARC Zonal Shift", type="documentation"] + +== What is zonal autoshift? + +Zonal autoshift is a capability in ARC that you can enable to authorize {aws} to shift traffic away from an AZ for supported resources, on your behalf, to healthy AZs in the {aws} Region. {aws} starts an autoshift when internal telemetry indicates that there is an impairment in one AZ in a Region that could potentially impact customers. The internal telemetry incorporates metrics from multiple sources, including the {aws} network, and the Amazon EC2 and Elastic Load Balancing services. + +{aws} ends autoshifts when indicators show that there is no longer an issue or potential issue. + +link:r53recovery/latest/dg/arc-zonal-autoshift.how-it-works.html["Learn more about ARC Zonal Autoshift", type="documentation"] + +== What does EKS do during an autoshift? + +EKS updates networking configurations to avoid directing traffic to impaired AZs. Additionally, if you are using Managed Node Groups, EKS will only launch new nodes in the healthy AZs during a zonal shift. When the shift expires or gets cancelled, the networking configurations will be restored to include the AZ that was previously detected as unhealthy. + +xref:zone-shift[Learn more about EKS Zonal Shift]. + +[[zone-shift-enable-steps,zone-shift-enable-steps.title]] +== Register EKS cluster with Amazon Application Recovery Controller (ARC) ({aws} console) + +. Find the name and region of the EKS cluster you want to register with ARC. +. Navigate to the link:eks[EKS console,type="console"] in that region, and select your cluster. +. On the *Cluster info* page, select the *Overview* tab. +. Under the *Zonal shift* heading, select the *Manage* button. +. Select *enable* or *disable* for _EKS Zonal Shift_. + +Now your EKS cluster is registered with ARC. + +If you want {aws} to detect and avoid impaired availability zones, you need to configure ARC Zonal Autoshift. For example, you can do this in the ARC console. + +== Next Steps + +* Learn how to link:r53recovery/latest/dg/arc-zonal-autoshift.start-cancel.html["enable zonal autoshift",type="documentation"] +* Learn how to manually link:r53recovery/latest/dg/arc-zonal-shift.start-cancel.html["start a zonal shift",type="documentation"] diff --git a/latest/ug/clusters/zone-shift.adoc b/latest/ug/clusters/zone-shift.adoc new file mode 100644 index 00000000..4153ff05 --- /dev/null +++ b/latest/ug/clusters/zone-shift.adoc @@ -0,0 +1,278 @@ +//!!NODE_ROOT
+[.topic] +[[zone-shift,zone-shift.title]] += Learn about Amazon Application Recovery Controller's (ARC) Zonal Shift in Amazon EKS +:info_doctype: section +:info_titleabbrev: Learn about Zonal Shift +:aws: pass:q[[.shared]``AWS``] +:imagesdir: images/ + +//GDC: remove use of "failure" + +Kubernetes has native features that enable you to make your applications more resilient to events such as the degraded health or impairment of an Availability Zone (AZ). When running your workloads in an Amazon EKS cluster, you can further improve your application environment's fault tolerance and application recovery using link:r53recovery/latest/dg/arc-zonal-shift.html["Amazon Application Recovery Controller's (ARC) zonal shift",type="documentation"] or link:r53recovery/latest/dg/arc-zonal-autoshift.html["zonal autoshift",type="documentation"]. ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. + +You can start a zonal shift for an EKS cluster, or you can allow {aws} to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For those customers seeking the highest availability goals, in the case that an AZ becomes impaired, it can be important to be able to steer all traffic away from the impaired AZ until it recovers. For this, you can also link:r53recovery/latest/dg/arc-zonal-shift.resource-types.html["_enable an ALB or NLB with ARC zonal shift_",type="documentation"]. + + +== Understanding East-West Network Traffic Flow Between Pods + +The following diagram illustrates two example workloads, Orders, and Products. The purpose of this example is to show how workloads and Pods in different AZs communicate. + +image::zs-traffic-flow-before-1.png[Illustration of network traffic] + +image::zs-traffic-flow-before-2.png[Illustration of network traffic] + +. For Orders to communicate with Products, it must first resolve the DNS name of the destination service. Orders will communicate with CoreDNS to fetch the virtual IP address (Cluster IP) for that Service. Once Orders resolves the Products service name, it sends traffic to that target IP. +. The kube-proxy runs on every node in the cluster and continuously watches the https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/[EndpointSlices] for Services. When a Service is created, an EndpointSlice is created and managed in the background by the EndpointSlice controller. Each EndpointSlice has a list or table of endpoints containing a subset of Pod addresses along with the nodes that they're running on. The kube-proxy sets up routing rules for each of these Pod endpoints using `iptables` on the nodes. The kube-proxy is also responsible for a basic form of load balancing by redirecting traffic destined to a service's Cluster IP to instead be sent to a Pod's IP address directly. The kube-proxy does this by rewriting the destination IP on the outgoing connection. +. The network packets are then sent to the Products Pod in AZ 2 via the ENIs on the respective nodes (as depicted in the diagram above). + + +=== Understanding ARC Zonal Shift in EKS + +In the case that there is an AZ impairment in your environment, you can initiate a zonal shift for your EKS cluster environment. Alternatively, you can allow {aws} to manage this for you with zonal autoshift. With zonal autoshift, {aws} will monitor the overall AZ health and respond to a potential AZ impairment by automatically shifting traffic away from the impaired AZ in your cluster environment. + +Once your EKS cluster zonal shift enabled with ARC, you can trigger a zonal shift or enable zonal autoshift using the ARC Console, the {aws} CLI, or the zonal shift and zonal autoshift APIs. +During an EKS zonal shift, the following will automatically take place: + +* All the nodes in the impacted AZ will be cordoned. This will prevent the Kubernetes Scheduler from scheduling new Pods onto the nodes in the unhealthy AZ. +* If you're using link:eks/latest/userguide/managed-node-groups.html["Managed Node Groups",type="documentation"], link:autoscaling/ec2/userguide/auto-scaling-benefits.html#AutoScalingBehavior.InstanceUsage["_Availability Zone rebalancing_",type="documentation"] will be suspended, and your Auto Scaling Group (ASG) will be updated to ensure that new EKS Data Plane nodes are only launched in the healthy AZs. +* The nodes in the unhealthy AZ will not be terminated and the Pods will not be evicted from these nodes. This is to ensure that when a zonal shift expires or gets cancelled, your traffic can be safely returned to the AZ which still has full capacity +* The EndpointSlice controller will find all the Pod endpoints in the impaired AZ and remove them from the relevant EndpointSlices. This will ensure that only Pod endpoints in healthy AZs are targeted to receive network traffic. When a zonal shift is cancelled or expires, the EndpointSlice controller will update the EndpointSlices to include the endpoints in the restored AZ. + +The diagrams below depicts a high level flow of how EKS zonal shift ensures that only healthy Pod endpoints are targeted in your cluster environment. + + +image::zs-traffic-flow-after-1.png[Illustration of network traffic] + +image::zs-traffic-flow-after-2.png[Illustration of network traffic] + + +== EKS Zonal Shift Requirements + +For zonal shift to work successfully in EKS, you need to setup your cluster environment to be resilient to an AZ impairment beforehand. Below is a list of the steps that you have to follow. + +* Provision your cluster's worker nodes across multiple AZs +* Provision enough compute capacity to withstand removal of a single AZ +* Pre-scale your Pods (including CoreDNS) in every AZ +* Spread multiple Pod replicas across all AZs to ensure that shifting away from a single AZ will leave you with sufficient capacity +* Co-locate interdependent or related Pods in the same AZ +* Test that your cluster environment would work as expected with on less AZ by manually starting a zonal shift. Alternatively, you can enable zonal autoshift and reply on the autoshift practice runs. This is not required for zonal shift to work in EKS but it's strongly recommended. + +=== Provision Your EKS Worker Nodes Across Multiple AZs + +{aws} Regions have multiple, separate locations with physical data centers known as Availability Zones (AZs). AZs are designed to be physically isolated from one another to avoid simultaneous impact that could affect an entire Region. When provisioning an EKS cluster, you should deploy your worker nodes across multiple AZs in a Region. This will make your cluster environment more resilient to the impairment of a single AZ, and allow you to maintain high availability (HA) of your applications running in the other AZs. When you start a zonal shift away from the impacted AZ, your EKS environment's in-cluster network will automatically update to only use healthy AZs, while maintaining a highly available posture for your cluster. + +Ensuring that you have such a multi-AZ setup for your EKS environment will enhance the overall reliability of your system. However, multi-AZ environments can play a significant role in how application data is transferred and processed, which will in turn have an impact on your environment's network charges. In particular, frequent egress cross-zone traffic (traffic distributed between AZs) can have a major impact on your network-related costs. You can apply different strategies to control the amount of cross-zone traffic between Pods in your EKS cluster and drive down the associated costs. Please refer to https://aws.github.io/aws-eks-best-practices/cost_optimization/cost_opt_networking/[_this best practice guide_] for more details on how to optimize network costs when running highly available EKS environments. + +The diagram below depicts a highly available EKS environment with 3 healthy AZs. + +image::zs-ha-before-failure.png[Illustration of network] + +The diagram below depicts how an EKS environment with 3 AZs is resilient to an AZ impairment and remains highly available because of the 2 other healthy AZs. + +image::zs-ha-after-failure.png[Illustration of network] + +=== Provision Enough Compute Capacity to Withstand Removal of a Single AZ + +To optimize resource utilization and costs for your compute infrastructure in the EKS Data Plane, it's a best practice to align compute capacity with your workload requirements. However, *if all your worker nodes are at full capacity*, then this makes you reliant on having new worker nodes added to the EKS Data Plane before new Pods can be scheduled. When running critical workloads, it is generally always a good practice to run with redundant capacity online to handle eventualities such as sudden increases in load, node health issues, etc. If you plan to use zonal shift, you are planning to remove an entire AZ of capacity so you need to adjust your redundant compute capacity so that it's sufficient to handle the load even with an AZ offline. + +When scaling your compute, the process of adding new nodes to the EKS Data Plane will take some time which can have implications on the real-time performance and availability of your applications, especially in the event of a zonal impairment. Your EKS environment should be resilient to absorb the load of losing an AZ to avoid a degraded experience for your end users or clients. This means minimizing or eliminating any lag between the time at which a new Pod is needed and when it's actually scheduled on a worker node. + +Additionally, in the event of a zonal impairment, you should mitigate the risk of a potential compute capacity constraint which would prevent newly required nodes from being added to your EKS Data Plane in the healthy AZs. + +To accomplish this, you should over-provision compute capacity in some of the worker nodes in each of the AZs so that the Kubernetes Scheduler has pre-existing capacity available for new Pod placements, especially when you have one less AZ in your environment. + + +=== Run & Spread Multiple Pod Replicas Across AZs + +Kubernetes allows you to pre-scale your workloads by running multiple instances (Pod replicas) of a single application. Running multiple Pod replicas for an application eliminates a single point of failure and increases its overall performance by reducing the resource strain on a single replica. However, to have both high availability and better fault tolerance for your applications, you should run and spread multiple replicas of an application across different failure domains (also referred to as topology domains) in this case AZs. With https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/[topology spread constraints], you can setup your applications to have pre-existing, static stability so that, in the case of an AZ impairment, you'll have enough replicas in the healthy AZs to immediately handle any additional spike or surge in traffic that they may experience. + +The diagram below depicts an EKS environment with east-to-west traffic flow when all AZs are healthy. + +image::zs-spread-constraints.png[Illustration of network] + + +The diagram below depicts an EKS environment with east-to-west traffic flow when a single AZ fails, and you initiate a zonal shift. + +image::zs-spread-constraints-2.png[Illustration of network] + +The code snippet below is an example of how to setup your workload with this Kubernetes feature. + +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: orders +spec: + replicas: 9 + selector: + matchLabels: + app:orders + template: + metadata: + labels: + app: orders + tier: backend + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: "topology.kubernetes.io/zone" + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: orders +---- + + +Most important, you should run multiple replicas of your DNS server software (CoreDNS/kube-dns) and apply similar topology spread constraints if they are not already configured by default. This will help ensure that you have enough DNS Pods in healthy AZs to continue handling service discovery requests for other communicating Pods in the cluster if there's a single AZ impairment. The link:eks/latest/userguide/managing-coredns.html["CoreDNS EKS add-on",type="documentation"] has default settings for the CoreDNS Pods to be spread across your cluster's Availability Zones if there are nodes in multiple AZs available. You can also replace these default settings with your own custom configurations. + +When installing https://github.com/coredns/helm/tree/master[CoreDNS with Helm], you can update the `replicaCount` in the https://github.com/coredns/helm/blob/master/charts/coredns/values.yaml[values.yaml file] to ensure that you have a sufficient number of replicas in each AZ. In addition, to ensure that these replicas are spread across the different AZs in your cluster environment, you should update the `topologySpreadConstraints` property in the same values.yaml file. The code snippet below demonstrates how to configure CoreDNS for this. + +**CoreDNS Helm values.yaml** + +[source,yaml] +---- +replicaCount: 6 +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + k8s-app: kube-dns +---- + + +In the event of an AZ impairment, you can absorb the increased load on the CoreDNS Pods by using an autoscaling system for CoreDNS. The number of DNS instances you require will depend on the number of workloads running in your cluster. CoreDNS is CPU bound which allows it to scale based on CPU using the https://aws.github.io/aws-eks-best-practices/reliability/docs/application/#horizontal-pod-autoscaler-hpa[Horizontal Pod Autoscaler (HPA)]. Below is an example that you can modify to suit your needs. + + +[source,yaml] +---- +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: coredns + namespace: default +spec: + maxReplicas: 20 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: coredns + targetCPUUtilizationPercentage: 50 +---- + +Alternatively, EKS can manage the autoscaling of the CoreDNS Deployment in the EKS add-on version of CoreDNS. This CoreDNS autoscaler continuously monitors the cluster state, including the number of nodes and CPU cores. Based on that information, the controller will dynamically adapt the number of replicas of the CoreDNS deployment in an EKS cluster. + +To enable the link:eks/latest/userguide/coredns-autoscaling.html["autoscaling configuration in the CoreDNS EKS add-on",type="documentation"], you should add the following optional configuration settings: + + +[source,json] +---- +{ + "autoScaling": { + "enabled": true + } +} +---- + + +You can also use https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/[NodeLocal DNS] or the https://github.com/kubernetes-sigs/cluster-proportional-autoscaler[cluster proportional autoscaler] to scale CoreDNS. You can read further about https://aws.github.io/aws-eks-best-practices/scalability/docs/cluster-services/#scale-coredns-horizontally[scaling CoreDNS horizontally here]. + +=== Colocate Interdependent Pods in the Same AZ + +In most cases, you may be running distinct workloads that have to communicate with each other for successful execution of an end-to-end process. If the distinct applications are spread across different AZs but are not colocated in the same AZ, then a single AZ impairment may impact the underlying end-to-end process. For example, if *Application A* has multiple replicas in AZ 1 and AZ 2, but *Application B* has all its replicas in AZ 3, then the loss of AZ 3 will affect any end-to-end processes between these two workloads (*Application A and B*). Combining topology spread constraints with pod affinity can enhance your application's resiliency by spreading Pods across all AZs, as well as configuring a relationship between certain Pods to ensure that they're colocated together. + +With https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/[pod affinity rules], you can define relationships between workloads to influence the behavior of the Kubernetes Scheduler so that it colocates Pods on the same worker node or in the same AZ. You can also configure how strict these scheduling constraints should be. + + +[source,yaml] +---- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: products + namespace: ecommerce + labels: + app.kubernetes.io/version: "0.1.6" + + spec: + serviceAccountName: graphql-service-account + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - orders + topologyKey: "kubernetes.io/hostname" +---- + + +The diagram below depicts pods that have been co-located on the same node using +pod affinity rules. + +image::zs-pod-affinity-rule.png[Illustration of network] + + +=== Test That Your Cluster Environment Can Handle The Loss of an AZ + +After completing the above requirements, the next important step is to test that you have sufficient compute and workload capacity to handle the loss of an AZ. You can do this by manually triggering a zonal shift in EKS. Alternatively, you can enable zonal autoshift and configure practice runs to test that your applications function as expected with one less AZ in your cluster environment. + + +## Frequently Asked Questions + +**Why should I use this feature?** + +By using ARC zonal shift or zonal autoshift in your EKS cluster, you can better maintain Kubernetes application availability by automating the quick recovery process of shifting in-cluster network traffic away from an impaired AZ. With ARC, you can avoid long and complicated steps which often lead to an extended recovery period during impaired AZ events. + +**How does this feature work with other {aws} services?** + +EKS integrates with ARC which provides the primary interface for you to accomplish recovery operations in {aws}. To ensure that in-cluster traffic is appropriately routed away from an impaired AZ, modifications are made to the list of network endpoints for Pods running in the Kubernetes data plane. If you're using {aws} Load Balancers for routing external traffic into the cluster, you can already register your load balancers with ARC and trigger a zonal shift on them to prevent traffic flowing into the degraded zone. This feature also interacts with Amazon EC2 Auto Scaling Groups (ASG) that are created by EKS Managed Node Groups (MNG). To prevent an impaired AZ from being used for new Kubernetes Pods or node launches, EKS removes the impaired AZ from the ASG. + +**How is this feature different from default Kubernetes protections?** + +This feature works in tandem with several Kubernetes native built-in protections that help customers stay resilient. You can configure Pod readiness and liveness probes that decide when a Pod should take traffic. When these probes fail, Kubernetes removes these Pods as targets for a Service and traffic is no longer sent to the Pod. While this is useful, it's non-trivial for customers to configure these health checks so that they are guaranteed to fail when a zone is degraded. The ARC zonal shift feature provides you with an additional safety net that helps them isolate a degraded AZ entirely when Kubernetes' native protections have not sufficed. It also provides you with an easy way to test the operational readiness and resilience of your architecture. + +**Can {aws} trigger a zonal shift on my behalf?** + +Yes, if you want a fully automated way of using ARC zonal shift, you can enable ARC zonal autoshift. With zonal autoshift, you can rely on {aws} to monitor the health of the AZs for your EKS cluster, and to automatically trigger a shift when an AZ impairment is detected. + +**What happens if I use this feature and my worker nodes and workloads are not pre-scaled?** + +If you are not pre-scaled and rely on provisioning additional nodes or Pods during a zonal shift, then you risk experiencing a delayed recovery. The process of adding new nodes to the Kubernetes data plane will take some time which can have implications on the real-time performance and availability of your applications, especially in the event of a zonal impairment. Additionally, in the event of a zonal impairment, you may encounter a potential compute capacity constraint which would prevent newly required nodes from being added to the healthy AZs. + +If your workloads are not pre-scaled and spread across all AZs in your cluster, a zonal impairment may impact the availability of an application that is only running on worker nodes in an impacted AZ. To mitigate the risk of a complete availability outage for your application, EKS has a fail safe for traffic to be sent to Pod endpoints in an impaired zone if that workload has all of its endpoints in the unhealthy AZ. However, it's strongly recommended that you rather pre-scale and spread your applications across all AZs to maintain availability in the event of a zonal issue. + +**What happens if I'm running a stateful application?** + +If you are running a stateful application, you will need to assess its fault tolerance depending on the use case and the architecture. If you have an active/standby architecture or pattern, there may be instances where the active is in an impaired AZ. At the application level, if the standby is not activated, you may run into issues with your application. You may also run into issues when new Kubernetes Pods are launched in healthy AZs since they will not be able to attach to the persistent volumes bounded to the impaired AZ. + +**Does this feature work with Karpenter?** + +Karpenter support is currently not available with ARC zonal shift and zonal autoshift in EKS. If an AZ is impaired, you can adjust the relevant Karpenter NodePool configuration by removing the unhealthy AZ so that new worker nodes are only launched in the healthy AZs. + +**Does this feature work with EKS Fargate?** + +This feature does not work with EKS Fargate. By default, when EKS Fargate recognizes a zonal health event, Pods will prefer to run in the other AZs. + +**Will the EKS managed Kubernetes control plane be impacted?** + +No, by default Amazon EKS runs and scales the Kubernetes control plane across multiple AZs to ensure high availability. ARC zonal shift and zonal autoshift will only act on the Kubernetes data plane. + +**Are there any costs associated with this new feature?** + +You can use ARC zonal shift and zonal autoshift in your EKS cluster at no additional charge. However, you will continue to pay for provisioned instances and it is strongly recommended that you pre-scale your Kubernetes data plane before using this feature. You should consider the right balance between cost and application availability. + +== Additional Resources + +* link:r53recovery/latest/dg/arc-zonal-shift.how-it-works.html["How a zonal shift works",type="documentation"] +* link:r53recovery/latest/dg/route53-arc-best-practices.zonal-shifts.html#zonalshift.route53-arc-best-practices.zonal-shifts["Best practices for zonal shifts in ARC",type="documentation"] +* link:r53recovery/latest/dg/arc-zonal-shift.resource-types.html["Resources and scenarios supported for zonal shift and zonal autoshift",type="documentation"] +* link:blogs/containers/operating-resilient-workloads-on-amazon-eks/["Operating resilient workloads on Amazon EKS",type="marketing"] +* link:blogs/containers/eliminate-kubernetes-node-scaling-lag-with-pod-priority-and-over-provisioning/["Eliminate Kubernetes node scaling lag with pod priority and over-provisioning",type="marketing"] +* link:eks/latest/userguide/coredns-autoscaling.html["Scale CoreDNS Pods for high DNS traffic",type="documentation"] diff --git a/latest/ug/connector/connecting-cluster.adoc b/latest/ug/connector/connecting-cluster.adoc new file mode 100644 index 00000000..5f3cb087 --- /dev/null +++ b/latest/ug/connector/connecting-cluster.adoc @@ -0,0 +1,216 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[connecting-cluster,connecting-cluster.title]] += Connect an external [.noloc]`Kubernetes` cluster to the Amazon EKS Management Console +:info_doctype: section +:info_title: Connect an external Kubernetes cluster to the Amazon EKS Management Console +:info_titleabbrev: Connect a cluster +:info_abstract: Learn to connect an external Kubernetes cluster to an Amazon EKS Management Console and install the eks-connector agent via Helm or YAML manifests to enable visibility and management of the external cluster. + +[abstract] +-- +Learn to connect an external [.noloc]`Kubernetes` cluster to an Amazon EKS Management Console and install the eks-connector agent via Helm or YAML manifests to enable visibility and management of the external cluster. +-- + +You can connect an external [.noloc]`Kubernetes` cluster to Amazon EKS by using multiple methods in the following process. This process involves two steps: Registering the cluster with Amazon EKS and installing the `eks-connector` agent in the cluster. + +[IMPORTANT] +==== + +You must complete the second step within 3 days of completing the first step, before the registration expires. + +==== + +[[connecting-cluster-considerations,connecting-cluster-considerations.title]] +== Considerations + +You can use YAML manifests when installing the agent. Alternatively, you can use Helm if you register the cluster with the {aws-management-console} or {aws} Command Line Interface. However, you cannot use Helm to install the agent if you register the cluster with `eksctl`. + +[[connector-prereqs,connector-prereqs.title]] +== Prerequisites + +* Ensure the Amazon EKS Connector agent role was created. Follow the steps in <>. +* You must have the following permissions to register a cluster: ++ +** `eks:RegisterCluster` +** `ssm:CreateActivation` +** `ssm:DeleteActivation` +** `iam:PassRole` + + +[[connector-connecting,connector-connecting.title]] +== Step 1: Registering the cluster +To register a cluster to Amazon EKS connector, you can use one of these tools: + +* <> +* <> +* <> + +=== {aws} CLI [[awscli_register_cluster_connect]] + +. {aws} CLI must be installed. To install or upgrade it, see link:cli/latest/userguide/cli-chap-install.html[Installing the {aws} CLI,type="documentation"]. +. For the Connector configuration, specify your Amazon EKS Connector agent IAM role. For more information, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks register-cluster \ + --name my-first-registered-cluster \ + --connector-config roleArn={arn-aws}iam::111122223333:role/AmazonEKSConnectorAgentRole,provider="OTHER" \ + --region aws-region +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "cluster": { + "name": "my-first-registered-cluster", + "arn": "{arn-aws}eks:region:111122223333:cluster/my-first-registered-cluster", + "createdAt": 1627669203.531, + "ConnectorConfig": { + "activationId": "xxxxxxxxACTIVATION_IDxxxxxxxx", + "activationCode": "xxxxxxxxACTIVATION_CODExxxxxxxx", + "activationExpiry": 1627672543.0, + "provider": "OTHER", + "roleArn": "{arn-aws}iam::111122223333:role/AmazonEKSConnectorAgentRole" + }, + "status": "CREATING" + } +} +---- ++ +You use the `aws-region`, `activationId`, and `activationCode` values in the next step. + +=== {aws-management-console} [[console_register_cluster_connect]] +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose *Add cluster* and select *Register* to bring up the configuration page. +. On the *Configure cluster* section, fill in the following fields: ++ +*** *Name* – A unique name for your cluster. +*** *Provider* – Choose to display the dropdown list of [.noloc]`Kubernetes` cluster providers. If you don't know the specific provider, select *Other*. +*** *EKS Connector role* – Select the role to use for connecting the cluster. +. Select *Register cluster*. +. The Cluster overview page displays. If you want to use the Helm chart, copy the `helm install` command and continue to the next step. If you want to use the YAML manifest, choose *Download YAML file* to download the manifest file to your local drive. ++ +[IMPORTANT] +==== +This is your only opportunity to copy the `helm install` command or download this file. Don't navigate away from this page, as the link will not be accessible and you must deregister the cluster and start the steps from the beginning. +==== ++ +The command or manifest file can be used only once for the registered cluster. If you delete resources from the [.noloc]`Kubernetes` cluster, you must re-register the cluster and obtain a new manifest file. + +Continue to the next step to apply the manifest file to your [.noloc]`Kubernetes` cluster. + +=== `eksctl` [[eksctl_register_cluster_connect]] +. `eksctl` version `0.68` or later must be installed. To install or upgrade it, see <>. ++ +. Register the cluster by providing a name, provider, and region. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl register cluster --name my-cluster --provider my-provider --region region-code +---- ++ +Example output: ++ +[source,bash,subs="verbatim,attributes"] +---- + +2021-08-19 13:47:26 [ℹ] creating IAM role "eksctl-20210819194112186040" +2021-08-19 13:47:26 [ℹ] registered cluster "" successfully +2021-08-19 13:47:26 [ℹ] wrote file eks-connector.yaml to +2021-08-19 13:47:26 [ℹ] wrote file eks-connector-clusterrole.yaml to +2021-08-19 13:47:26 [ℹ] wrote file eks-connector-console-dashboard-full-access-group.yaml to +2021-08-19 13:47:26 [!] note: "eks-connector-clusterrole.yaml" and "eks-connector-console-dashboard-full-access-group.yaml" give full EKS Console access to IAM identity "", edit if required; read https://eksctl.io/usage/eks-connector for more info +2021-08-19 13:47:26 [ℹ] run `kubectl apply -f eks-connector.yaml,eks-connector-clusterrole.yaml,eks-connector-console-dashboard-full-access-group.yaml` before expiry> to connect the cluster +---- ++ +This creates files on your local computer. These files must be applied to the external cluster within 3 days, or the registration expires. +. In a terminal that can access the cluster, apply the `eks-connector-binding.yaml` file: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f eks-connector-binding.yaml +---- + + +[[eks-connector-apply,eks-connector-apply.title]] +== Step 2: Installing the `eks-connector` agent +To install the `eks-connector` agent, use one of the following tools: + + +* <> +* <> + +=== helm [[helm_agent_cluster_connect]] + +[NOTE] +==== +If you registered the cluster with `eksctl`, use the YAML manifest method instead of the Helm chart method. +==== + +. If you used the {aws} CLI in the previous step, replace the `ACTIVATION_CODE` and `ACTIVATION_ID` in the following command with the `activationId`, and `activationCode` values respectively. Replace the `aws-region` with the {aws} Region that you used in the previous step. Then run the command to install the `eks-connector` agent on the registering cluster: ++ +[source,shell,subs="verbatim,attributes"] +---- +$ helm install eks-connector \ + --namespace eks-connector \ + oci://public.ecr.aws/eks-connector/eks-connector-chart \ + --set eks.activationCode=ACTIVATION_CODE \ + --set eks.activationId=ACTIVATION_ID \ + --set eks.agentRegion=aws-region +---- ++ +If you used the {aws-management-console} in the previous step, use the command that you copied from the previous step that has these values filled in. +. Check the healthiness of the installed `eks-connector` deployment and wait for the status of the registered cluster in Amazon EKS to be `ACTIVE`. + +=== yaml [[yaml_agent_cluster_connect]] +Complete the connection by applying the Amazon EKS Connector manifest file to your [.noloc]`Kubernetes` cluster. To do this, you must use the methods described previously. If the manifest isn't applied within three days, the Amazon EKS Connector registration expires. If the cluster connection expires, the cluster must be deregistered before connecting the cluster again. + +. Download the Amazon EKS Connector YAML file. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://amazon-eks.s3.us-west-2.amazonaws.com/eks-connector/manifests/eks-connector/latest/eks-connector.yaml +---- +. Edit the Amazon EKS Connector YAML file to replace all references of `%AWS_REGION%`, `%EKS_ACTIVATION_ID%`, `%EKS_ACTIVATION_CODE%` with the `aws-region`, `activationId`, and `activationCode` from the output of the previous step. ++ +The following example command can replace these values. ++ +[source,bash,subs="verbatim,attributes"] +---- +sed -i "s~%AWS_REGION%~$aws-region~g; s~%EKS_ACTIVATION_ID%~$EKS_ACTIVATION_ID~g; s~%EKS_ACTIVATION_CODE%~$(echo -n $EKS_ACTIVATION_CODE | base64)~g" eks-connector.yaml +---- ++ +[IMPORTANT] +==== +Ensure that your activation code is in the base64 format. +==== +. In a terminal that can access the cluster, you can apply the updated manifest file by running the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f eks-connector.yaml +---- +. After the Amazon EKS Connector manifest and role binding YAML files are applied to your [.noloc]`Kubernetes` cluster, confirm that the cluster is now connected. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster \ + --name "my-first-registered-cluster" \ + --region AWS_REGION +---- ++ +The output should include `status=ACTIVE`. +. (Optional) Add tags to your cluster. For more information, see <>. + + +[[eks-connector-next,eks-connector-next.title]] +== Next steps + +If you have any issues with these steps, see <>. + +To grant additional link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"] access to the Amazon EKS console to view [.noloc]`Kubernetes` resources in a connected cluster, see <>. diff --git a/latest/ug/connector/connector-grant-access.adoc b/latest/ug/connector/connector-grant-access.adoc new file mode 100644 index 00000000..aaa58c36 --- /dev/null +++ b/latest/ug/connector/connector-grant-access.adoc @@ -0,0 +1,75 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[connector-grant-access,connector-grant-access.title]] += Grant access to view [.noloc]`Kubernetes` cluster resources on an Amazon EKS console +:info_doctype: section +:info_title: Grant access to view Kubernetes cluster resources on an \ + Amazon EKS console +:info_titleabbrev: Grant access to Kubernetes clusters from {aws} console +:info_abstract: Learn to grant IAM principals access to view Kubernetes cluster resources on an Amazon EKS Management Console. + +[abstract] +-- +Learn to grant IAM principals access to view Kubernetes cluster resources on an Amazon EKS Management Console. +-- + +Grant link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"] access to the Amazon EKS console to view information about [.noloc]`Kubernetes` resources running on your connected cluster. + +[[connector-grant-access-prereqs,connector-grant-access-prereqs.title]] +== Prerequisites + +The link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that you use to access the {aws-management-console} must meet the following requirements: + +* It must have the `eks:AccessKubernetesApi` IAM permission. +* The Amazon EKS Connector service account can impersonate the IAM principal in the cluster. This allows the Amazon EKS Connector to map the IAM principal to a [.noloc]`Kubernetes` user. + +*To create and apply the Amazon EKS Connector cluster role* + +. Download the `eks-connector` cluster role template. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/eks-connector/manifests/eks-connector-console-roles/eks-connector-clusterrole.yaml +---- +. Edit the cluster role template YAML file. Replace references of `%IAM_ARN%` with the Amazon Resource Name (ARN) of your IAM principal. +. Apply the Amazon EKS Connector cluster role YAML to your [.noloc]`Kubernetes` cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f eks-connector-clusterrole.yaml +---- + +For an IAM principal to view [.noloc]`Kubernetes` resources in Amazon EKS console, the principal must be associated with a [.noloc]`Kubernetes` `role` or `clusterrole` with necessary permissions to read the resources. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the [.noloc]`Kubernetes` documentation. + +*To configure an IAM principal to access the connected cluster* + +. You can download either of these example manifest files to create a `clusterrole` and `clusterrolebinding` or a `role` and `rolebinding`, respectively: + ++ +*View [.noloc]`Kubernetes` resources in all namespaces*::: +** The `eks-connector-console-dashboard-full-access-clusterrole` cluster role gives access to all namespaces and resources that can be visualized in the console. You can change the name of the `role`, `clusterrole` and their corresponding binding before applying it to your cluster. Use the following command to download a sample file. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/eks-connector/manifests/eks-connector-console-roles/eks-connector-console-dashboard-full-access-group.yaml +---- + + +*View [.noloc]`Kubernetes` resources in a specific namespace*::: +** The namespace in this file is `default`, so if you want to specify a different namespace, edit the file before applying it to your cluster. Use the following command to download a sample file. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/eks-connector/manifests/eks-connector-console-roles/eks-connector-console-dashboard-restricted-access-group.yaml +---- +. Edit the full access or restricted access YAML file to replace references of `%IAM_ARN%` with the Amazon Resource Name (ARN) of your IAM principal. +. Apply the full access or restricted access YAML files to your [.noloc]`Kubernetes` cluster. Replace the YAML file value with your own. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f eks-connector-console-dashboard-full-access-group.yaml +---- + +To view [.noloc]`Kubernetes` resources in your connected cluster, see <>. Data for some resource types on the *Resources* tab isn't available for connected clusters. diff --git a/latest/ug/connector/deregister-connected-cluster.adoc b/latest/ug/connector/deregister-connected-cluster.adoc new file mode 100644 index 00000000..090a4358 --- /dev/null +++ b/latest/ug/connector/deregister-connected-cluster.adoc @@ -0,0 +1,93 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[deregister-connected-cluster,deregister-connected-cluster.title]] += Deregister a Kubernetes cluster from the Amazon EKS console +:info_doctype: section +:info_title: Deregister a Kubernetes cluster from the Amazon EKS console +:info_titleabbrev: Deregister a cluster +:info_abstract: Learn to deregister a Kubernetes cluster from Amazon EKS and uninstall the eks-connector agent to stop managing the cluster from the Amazon EKS Management Console. + +[abstract] +-- +Learn to deregister a [.noloc]`Kubernetes` cluster from Amazon EKS and uninstall the eks-connector agent to stop managing the cluster from the Amazon EKS Management Console. +-- + +If you are finished using a connected cluster, you can deregister it. After it's deregistered, the cluster is no longer visible in the Amazon EKS console. + +You must have the following permissions to call the deregisterCluster API: + + + +* `eks:DeregisterCluster` +* `ssm:DeleteActivation` +* `ssm:DeregisterManagedInstance` + +This process involves two steps: Deregistering the cluster with Amazon EKS and uninstalling the eks-connector agent in the cluster. + +[[deregister-connected-cluster-eks,deregister-connected-cluster-eks.title]] +== Deregister the [.noloc]`Kubernetes` cluster +To deregister a cluster from Amazon EKS connector, you can use one of these tools: + +* <> +* <> +* <> + +=== {aws} CLI [[awscli_deregister_cluster_connect]] + +. {aws} CLI must be installed. To install or upgrade it, see link:cli/latest/userguide/cli-chap-install.html[Installing the {aws} CLI,type="documentation"]. +. Ensure the Amazon EKS Connector agent role was created. +. Deregister the connected cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks deregister-cluster \ + --name my-cluster \ + --region region-code +---- + +=== {aws-management-console} [[console_deregister_cluster_connect]] + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose *Clusters*. +. On the *Clusters* page, select the connected cluster and select *Deregister*. +. Confirm that you want to deregister the cluster. + +=== `eksctl` [[eksctl_deregister_cluster_connect]] + +. Install `eksctl` version `0.68` or later. To install or upgrade it, see <>. +. Ensure the Amazon EKS Connector agent role was created. +. Deregister the connected cluster: ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl deregister cluster --name my-cluster +---- + + +[[deregister-connected-cluster-k8s,deregister-connected-cluster-k8s.title]] +== Clean up the resources in your [.noloc]`Kubernetes` cluster +To uninstall the `eks-connector` agent, use one of the following tools: + +* <> +* <> + +=== helm [[helm_agent_cluster_deregister]] + +Run the following command to uninstall the agent. + +[source,shell,subs="verbatim,attributes"] +---- +helm -n eks-connector uninstall eks-connector +---- + +=== yaml [[yaml_agent_cluster_deregister]] + +. Delete the Amazon EKS Connector YAML file from your [.noloc]`Kubernetes` cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete -f eks-connector.yaml +---- +. If you created `clusterrole` or `clusterrolebindings` for additional link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"] to access the cluster, delete them from your [.noloc]`Kubernetes` cluster. diff --git a/latest/ug/connector/eks-connector.adoc b/latest/ug/connector/eks-connector.adoc new file mode 100644 index 00000000..c0d61be8 --- /dev/null +++ b/latest/ug/connector/eks-connector.adoc @@ -0,0 +1,87 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[[eks-connector,eks-connector.title]] += Connect a [.noloc]`Kubernetes` cluster to an Amazon EKS Management Console with Amazon EKS Connector +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Connect a Kubernetes cluster to an Amazon EKS Management Console with Amazon EKS Connector +:info_titleabbrev: Amazon EKS Connector +:info_abstract: Discover how to connect conformant Kubernetes clusters to {aws} and visualize them in the Amazon EKS console using the Amazon EKS Connector agent and required IAM roles. + +[abstract] +-- +Discover how to connect conformant [.noloc]`Kubernetes` clusters to {aws} and visualize them in the Amazon EKS console using the Amazon EKS Connector agent and required IAM roles. +-- + +You can use Amazon EKS Connector to register and connect any conformant [.noloc]`Kubernetes` cluster to {aws} and visualize it in the Amazon EKS console. After a cluster is connected, you can see the status, configuration, and workloads for that cluster in the Amazon EKS console. You can use this feature to view connected clusters in Amazon EKS console, but you can't manage them. The Amazon EKS Connector requires an agent that is an https://github.com/aws/amazon-eks-connector[open source project on Github]. For additional technical content, including frequently asked questions and troubleshooting, see <>. + + +The Amazon EKS Connector can connect the following types of [.noloc]`Kubernetes` clusters to Amazon EKS. + + + +* On-premises [.noloc]`Kubernetes` clusters +* Self-managed clusters that are running on Amazon EC2 +* Managed clusters from other cloud providers + + +[[connect-cluster-reqts,connect-cluster-reqts.title]] +== Amazon EKS Connector considerations + +Before you use Amazon EKS Connector, understand the following: + + + +* You must have administrative privileges to the [.noloc]`Kubernetes` cluster to connect the cluster to Amazon EKS. +* The [.noloc]`Kubernetes` cluster must have [.noloc]`Linux` 64-bit (x86) worker nodes present before connecting. ARM worker nodes aren't supported. +* You must have worker nodes in your [.noloc]`Kubernetes` cluster that have outbound access to the `ssm.` and `ssmmessages.` Systems Manager endpoints. For more information, see link:general/latest/gr/ssm.html[Systems Manager endpoints,type="documentation"] in the _{aws} General Reference_. +* By default, you can connect up to 10 clusters in a Region. You can request an increase through the link:servicequotas/latest/userguide/request-quota-increase.html[service quota console,type="documentation"]. See link:servicequotas/latest/userguide/request-quota-increase.html[Requesting a quota increase,type="documentation"] for more information. +* Only the Amazon EKS `RegisterCluster`, `ListClusters`, `DescribeCluster`, and `DeregisterCluster` APIs are supported for external [.noloc]`Kubernetes` clusters. +* You must have the following permissions to register a cluster: ++ +** eks:RegisterCluster +** ssm:CreateActivation +** ssm:DeleteActivation +** iam:PassRole +* You must have the following permissions to deregister a cluster: ++ +** eks:DeregisterCluster +** ssm:DeleteActivation +** ssm:DeregisterManagedInstance + + +[[connector-iam-permissions,connector-iam-permissions.title]] +== Required IAM roles for Amazon EKS Connector + +Using the Amazon EKS Connector requires the following two IAM roles: + + + +* The <> service-linked role is created when you register a cluster for the first time. +* You must create the Amazon EKS Connector agent IAM role. See <> for details. + +To enable cluster and workload view permission for link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"], apply the `eks-connector` and Amazon EKS Connector cluster roles to your cluster. Follow the steps in <>. + +include::connecting-cluster.adoc[leveloffset=+1] + + +include::connector-grant-access.adoc[leveloffset=+1] + + +include::deregister-connected-cluster.adoc[leveloffset=+1] + + +include::troubleshooting-connector.adoc[leveloffset=+1] + + +include::tsc-faq.adoc[leveloffset=+1] + + +include::security-connector.adoc[leveloffset=+1] diff --git a/latest/ug/connector/images b/latest/ug/connector/images new file mode 120000 index 00000000..5e675731 --- /dev/null +++ b/latest/ug/connector/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/latest/ug/connector/security-connector.adoc b/latest/ug/connector/security-connector.adoc new file mode 100644 index 00000000..e7dfa1cf --- /dev/null +++ b/latest/ug/connector/security-connector.adoc @@ -0,0 +1,44 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[security-connector,security-connector.title]] += Understand security in Amazon EKS Connector +:info_doctype: section +:info_title: Understand security in Amazon EKS Connector +:info_titleabbrev: Security considerations +:info_abstract: Learn how the open-source EKS Connector affects security, and understand {aws} and \ + customer security responsibilities for connectivity, cluster management, and IAM \ + access control. + +[abstract] +-- +Learn how the open-source EKS Connector affects security, and understand {aws} and customer security responsibilities for connectivity, cluster management, and IAM access control. +-- + +The Amazon EKS Connector is an open source component that runs on your [.noloc]`Kubernetes` cluster. This cluster can be located outside of the {aws} environment. This creates additional considerations for security responsibilities. This configuration can be illustrated by the following diagram. Orange represents {aws} responsibilities, and blue represents customer responsibilities: + + + +image::images/connector-model.png[EKS Connector Responsibilities,scaledwidth=100%] + +This topic describes the differences in the responsibility model if the connected cluster is outside of {aws}. + +[[connect-aws-resp,connect-aws-resp.title]] +== {aws} responsibilities + +* Maintaining, building, and delivering Amazon EKS Connector, which is an https://github.com/aws/amazon-eks-connector[open source component] that runs on a customer's [.noloc]`Kubernetes` cluster and communicates with {aws}. +* Maintaining transport and application layer communication security between the connected [.noloc]`Kubernetes` cluster and {aws} services. + + +[[connect-cust-resp,connect-cust-resp.title]] +== Customer responsibilities + +* [.noloc]`Kubernetes` cluster specific security, specifically along the following lines: ++ +** [.noloc]`Kubernetes` secrets must be properly encrypted and protected. +** Lock down access to the `eks-connector` namespace. +* Configuring role-based access control (RBAC) permissions to manage link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] access from {aws}. For instructions, see <>. +* Installing and upgrading Amazon EKS Connector. +* Maintaining the hardware, software, and infrastructure that supports the connected [.noloc]`Kubernetes` cluster. +* Securing their {aws} accounts (for example, through safeguarding your link:IAM/latest/UserGuide/best-practices.html#lock-away-credentials[root user credentials,type="documentation"]). diff --git a/latest/ug/connector/troubleshooting-connector.adoc b/latest/ug/connector/troubleshooting-connector.adoc new file mode 100644 index 00000000..3440d66e --- /dev/null +++ b/latest/ug/connector/troubleshooting-connector.adoc @@ -0,0 +1,275 @@ +//!!NODE_ROOT
+[.topic] +[[troubleshooting-connector,troubleshooting-connector.title]] += Troubleshoot Amazon EKS Connector issues +:info_doctype: section +:info_title: Troubleshoot Amazon EKS Connector issues +:info_titleabbrev: Troubleshoot Amazon EKS Connector +:info_abstract: Troubleshoot and resolve common issues when using Amazon EKS Connector to connect your Kubernetes clusters to Amazon EKS. + +include::../attributes.txt[] + +[abstract] +-- +Troubleshoot and resolve common issues when using Amazon EKS Connector to connect your [.noloc]`Kubernetes` clusters to Amazon EKS. +-- + +This topic covers some of the common errors that you might encounter while using the Amazon EKS Connector, including instructions on how to resolve them and workarounds. + +[[tsc-steps,tsc-steps.title]] +== Basic troubleshooting + +This section describes steps to diagnose Amazon EKS Connector issues. + +[[tsc-check,tsc-check.title]] +=== Check Amazon EKS Connector status + +To check the Amazon EKS Connector status, type: + +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n eks-connector +---- + + +[[tsc-logs,tsc-logs.title]] +=== Inspect Amazon EKS Connector logs + +The Amazon EKS Connector [.noloc]`Pod` consists of three containers. To retrieve full logs for all of these containers so that you can inspect them, run the following commands: + + + +* `connector-init` ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl logs eks-connector-0 --container connector-init -n eks-connector +kubectl logs eks-connector-1 --container connector-init -n eks-connector +---- +* `connector-proxy` ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl logs eks-connector-0 --container connector-proxy -n eks-connector +kubectl logs eks-connector-1 --container connector-proxy -n eks-connector +---- +* `connector-agent` ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl exec eks-connector-0 --container connector-agent -n eks-connector -- cat /var/log/amazon/ssm/amazon-ssm-agent.log +kubectl exec eks-connector-1 --container connector-agent -n eks-connector -- cat /var/log/amazon/ssm/amazon-ssm-agent.log +---- + + +[[tsc-name,tsc-name.title]] +=== Get the effective cluster name + +Amazon EKS clusters are uniquely identified by `clusterName` within a single {aws} account and {aws} Region. If you have multiple connected clusters in Amazon EKS, you can confirm which Amazon EKS cluster that the current [.noloc]`Kubernetes` cluster is registered to. To do this, enter the following to find out the `clusterName` of the current cluster. + +// Not using subs="quotes" here with [.replaceable]`region-code` because the * characters get dropped. +[source,bash,subs="verbatim,attributes"] +---- +kubectl exec eks-connector-0 --container connector-agent -n eks-connector \ + -- cat /var/log/amazon/ssm/amazon-ssm-agent.log | grep -m1 -oE "eks_c:[a-zA-Z0-9_-]+" | sed -E "s/^.*eks_c:([a-zA-Z0-9_-]+)_[a-zA-Z0-9]+.*$/\1/" +kubectl exec eks-connector-1 --container connector-agent -n eks-connector \ + -- cat /var/log/amazon/ssm/amazon-ssm-agent.log | grep -m1 -oE "eks_c:[a-zA-Z0-9_-]+" | sed -E "s/^.*eks_c:([a-zA-Z0-9_-]+)_[a-zA-Z0-9]+.*$/\1/" +---- + + +[[tsc-misc,tsc-misc.title]] +=== Miscellaneous commands + +The following commands are useful to retrieve information that you need to troubleshoot issues. + +* Use the following command to gather images that's used by [.noloc]`Pods` in Amazon EKS Connector. +// Not using subs="quotes" here with [.replaceable]`region-code` because the * characters get dropped. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n eks-connector -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' +---- +* Use the following command to determine the node names that Amazon EKS Connector is running on. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n eks-connector -o jsonpath="{.items[*].spec.nodeName}" | tr -s '[[:space:]]' '\n' +---- +* Run the following command to get your [.noloc]`Kubernetes` client and server versions. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl version +---- +* Run the following command to get information about your nodes. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes -o wide --show-labels +---- + + +[[w662aac60c33b9]] +== Helm issue: 403 Forbidden + +If you received the following error when running helm install commands: + +[source,bash,subs="verbatim,attributes"] +---- +Error: INSTALLATION FAILED: unexpected status from HEAD request to https://public.ecr.aws/v2/eks-connector/eks-connector-chart/manifests/0.0.6: 403 Forbidden +---- + +You can run the following line to fix it: + +[source,bash,subs="verbatim,attributes"] +---- +docker logout public.ecr.aws +---- + + +[[symp-pending,symp-pending.title]] +== Console error: the cluster is stuck in the Pending state + +If the cluster gets stuck in the `Pending` state on the Amazon EKS console after you're registered it, it might be because the Amazon EKS Connector didn't successfully connect the cluster to {aws} yet. For a registered cluster, the `Pending` state means that the connection isn't successfully established. To resolve this issue, make sure that you have applied the manifest to the target [.noloc]`Kubernetes` cluster. If you applied it to the cluster, but the cluster is still in the `Pending` state, then the `eks-connector` statefulset might be unhealthy. To troubleshoot this issue, see <>in this topic. + +[[symp-imp,symp-imp.title]] +== Console error: User system:serviceaccount:eks-connector:eks-connector can't impersonate resource users in API group at cluster scope + +The Amazon EKS Connector uses [.noloc]`Kubernetes` https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation[user impersonation] to act on behalf of link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"] from the {aws-management-console}. Each principal that accesses the [.noloc]`Kubernetes` API from the {aws} `eks-connector` service account must be granted permission to impersonate the corresponding [.noloc]`Kubernetes` user with an IAM ARN as its [.noloc]`Kubernetes` user name. In the following examples, the IAM ARN is mapped to a [.noloc]`Kubernetes` user. + + +* IAM user [.replaceable]`john` from {aws} account [.replaceable]`111122223333` is mapped to a [.noloc]`Kubernetes` user. link:IAM/latest/UserGuide/id_users.html[IAM best practices,type="documentation"] recommend that you grant permissions to roles instead of users. ++ +[source,bash,subs="verbatim,attributes"] +---- +{arn-aws}iam::111122223333:user/john +---- +* IAM role [.replaceable]`admin` from {aws} account [.replaceable]`111122223333` is mapped to a [.noloc]`Kubernetes` user: ++ +[source,bash,subs="verbatim,attributes"] +---- +{arn-aws}iam::111122223333:role/admin +---- ++ +The result is an IAM role ARN, instead of the {aws} STS session ARN. + +For instructions on how to configure the `ClusterRole` and `ClusterRoleBinding` to grant the `eks-connector` service account privilege to impersonate the mapped user, see <>. Make sure that in the template, `%IAM_ARN%` is replaced with the IAM ARN of the {aws-management-console} IAM principal. + +[[symp-rbac,symp-rbac.title]] +== Console error: [...] is forbidden: User [...] cannot list resource [...] in API group at the cluster scope + +Consider the following problem. The Amazon EKS Connector has successfully impersonated the requesting {aws-management-console} IAM principal in the target [.noloc]`Kubernetes` cluster. However, the impersonated principal doesn't have RBAC permission for [.noloc]`Kubernetes` API operations. + +To resolve this issue, there are two methods to give permissions to additional users. If you previously installed eks-connector via helm chart, you can easily grant users access by running the following command. Replace the `userARN1` and `userARN2` with a list of the ARNs of the IAM roles to give access to view the [.noloc]`Kubernetes` resources: + +[source,shell,subs="verbatim,attributes"] +---- +helm upgrade eks-connector oci://public.ecr.aws/eks-connector/eks-connector-chart \ + --reuse-values \ + --set 'authentication.allowedUserARNs={userARN1,userARN2}' +---- + +Or, as the cluster administrator, grant the appropriate level of RBAC privileges to individual [.noloc]`Kubernetes` users. For more information and examples, see <>. + +[[symp-con,symp-con.title]] +== Console error: Amazon EKS can't communicate with your [.noloc]`Kubernetes` cluster API server. The cluster must be in an ACTIVE state for successful connection. Try again in few minutes. + +If the Amazon EKS service can't communicate with the Amazon EKS connector in the target cluster, it might be because of one of the following reasons: + +* The Amazon EKS Connector in the target cluster is unhealthy. +* Poor connectivity or an interrupted connection between the target cluster and the {aws} Region. + +To resolve this problem, check the <>. If you don't see an error for the Amazon EKS Connector, retry the connection after a few minutes. If you regularly experience high latency or intermittent connectivity for the target cluster, consider re-registering the cluster to an {aws} Region that's located closer to you. + +[[symp-loop,symp-loop.title]] +== Amazon EKS connector [.noloc]`Pods` are crash looping + +There are many reasons that can cause an Amazon EKS connector [.noloc]`Pod` to enter the `CrashLoopBackOff` status. This issue likely involves the `connector-init` container. Check the status of the Amazon EKS connector [.noloc]`Pod`. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n eks-connector +---- + +An example output is as follows. + +[source,bash,subs="verbatim,attributes"] +---- +NAME READY STATUS RESTARTS AGE +eks-connector-0 0/2 Init:CrashLoopBackOff 1 7s +---- + +If your output is similar to the previous output, see <> to troubleshoot the issue. + +[[symp-regis,symp-regis.title]] +== Failed to initiate eks-connector: InvalidActivation + +When you start the Amazon EKS Connector for the first time, it registers an `activationId` and `activationCode` with Amazon Web Services. The registration might fail, which can cause the `connector-init` container to crash with an error similar to the following error. + +[source,bash,subs="verbatim,attributes"] +---- +F1116 20:30:47.261469 1 init.go:43] failed to initiate eks-connector: InvalidActivation: +---- + +To troubleshoot this issue, consider the following causes and recommended fixes: + + + +* Registration might have failed because the `activationId` and `activationCode` aren't in your manifest file. If this is the case, make sure that they are the correct values that were returned from the `RegisterCluster` API operation, and that the `activationCode` is in the manifest file. The `activationCode` is added to [.noloc]`Kubernetes` secrets, so it must be `base64` encoded. For more information, see <>. +* Registration might have failed because your activation expired. This is because, for security reasons, you must activate the Amazon EKS Connector within three days after registering the cluster. To resolve this issue, make sure that the Amazon EKS Connector manifest is applied to the target [.noloc]`Kubernetes` cluster before the expiry date and time. To confirm your activation expiry date, call the `DescribeCluster` API operation. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster +---- ++ +In the following example response, the expiry date and time is recorded as `2021-11-12T22:28:51.101000-08:00`. ++ +[source,json,subs="verbatim,attributes"] +---- + +{ + "cluster": { + "name": "my-cluster", + "arn": "{arn-aws}eks:region:111122223333:cluster/my-cluster", + "createdAt": "2021-11-09T22:28:51.449000-08:00", + "status": "FAILED", + "tags": { + }, + "connectorConfig": { + "activationId": "00000000-0000-0000-0000-000000000000", + "activationExpiry": "2021-11-12T22:28:51.101000-08:00", + "provider": "OTHER", + "roleArn": "{arn-aws}iam::111122223333:role/my-connector-role" + } + } +} +---- ++ +If the `activationExpiry` passed, deregister the cluster and register it again. Doing this generates a new activation. + + +[[symp-out,symp-out.title]] +== Cluster node is missing outbound connectivity + +To work properly, the Amazon EKS Connector requires outbound connectivity to several {aws} endpoints. You can't connect a private cluster without outbound connectivity to a target {aws} Region. To resolve this issue, you must add the necessary outbound connectivity. For information about connector requirements, see <>. + +[[symp-img,symp-img.title]] +== Amazon EKS connector [.noloc]`Pods` are in `ImagePullBackOff` state + +If you run the `get pods` command and [.noloc]`Pods` are in the `ImagePullBackOff` state, they can't work properly. If the Amazon EKS Connector [.noloc]`Pods` are in the `ImagePullBackOff` state, they can't work properly. Check the status of your Amazon EKS Connector [.noloc]`Pods`. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n eks-connector +---- + +An example output is as follows. + +[source,bash,subs="verbatim,attributes"] +---- +NAME READY STATUS RESTARTS AGE +eks-connector-0 0/2 Init:ImagePullBackOff 0 4s +---- + +The default Amazon EKS Connector manifest file references images from the https://gallery.ecr.aws/[Amazon ECR Public Gallery]. It's possible that the target [.noloc]`Kubernetes` cluster can't pull images from the Amazon ECR Public Gallery. Either resolve the Amazon ECR Public Gallery image pull issue, or consider mirroring the images in the private container registry of your choice. diff --git a/latest/ug/connector/tsc-faq.adoc b/latest/ug/connector/tsc-faq.adoc new file mode 100644 index 00000000..82c65f30 --- /dev/null +++ b/latest/ug/connector/tsc-faq.adoc @@ -0,0 +1,27 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[tsc-faq,tsc-faq.title]] += {aws} Connector frequently asked questions +:info_doctype: section +:info_title: {aws} Connector frequently asked questions +:info_titleabbrev: Frequently asked questions +:info_abstract: Learn to connect and manage Kubernetes clusters outside {aws} with Amazon EKS Connector, enabling unified cluster visibility and management across environments using a secure, outbound-only connection. + +[abstract] +-- +Learn to connect and manage [.noloc]`Kubernetes` clusters outside {aws} with Amazon EKS Connector, enabling unified cluster visibility and management across environments using a secure, outbound-only connection. +-- + +.Q: How does the underlying technology behind the Amazon EKS Connector work? +A: The Amazon EKS Connector is based on the {aws} Systems Manager (Systems Manager) agent. The Amazon EKS Connector runs as a `StatefulSet` on your [.noloc]`Kubernetes` cluster. It establishes a connection and proxies the communication between the API server of your cluster and Amazon Web Services. It does this to display cluster data in the Amazon EKS console until you disconnect the cluster from {aws}. The Systems Manager agent is an open source project. For more information about this project, see the https://github.com/aws/amazon-ssm-agent[GitHub project page]. + +.Q: I have an on-premises [.noloc]`Kubernetes` cluster that I want to connect. Do I need to open firewall ports to connect it? +A: No, you don't need to open any firewall ports. The [.noloc]`Kubernetes` cluster only requires outbound connection to {aws} Regions. {aws} services never access resources in your on-premises network. The Amazon EKS Connector runs on your cluster and initiates the connection to {aws}. When the cluster registration completes, {aws} only issues commands to the Amazon EKS Connector after you start an action from the Amazon EKS console that requires information from the [.noloc]`Kubernetes` API server on your cluster. + +.Q: What data is sent from my cluster to {aws} by the Amazon EKS Connector? +A: The Amazon EKS Connector sends technical information that's necessary for your cluster to be registered on {aws}. It also sends cluster and workload metadata for the Amazon EKS console features that customers request. The Amazon EKS Connector only gathers or sends this data if you start an action from the Amazon EKS console or the Amazon EKS API that necessitates the data to be sent to {aws}. Other than the [.noloc]`Kubernetes` version number, {aws} doesn't store any data by default. It stores data only if you authorize it to. + +.Q: Can I connect a cluster outside of an {aws} Region? +A: Yes, you can connect a cluster from any location to Amazon EKS. Moreover, your Amazon EKS service can be located in any {aws} public commercial {aws} Region. This works with a valid network connection from your cluster to the target {aws} Region. We recommend that you pick an {aws} Region that is closest to your cluster location for UI performance optimization. For example, if you have a cluster running in Tokyo, connect your cluster to the {aws} Region in Tokyo (that is, the `ap-northeast-1` {aws} Region) for low latency. You can connect a cluster from any location to Amazon EKS in any of the public commercial {aws} Regions, except the China or GovCloud {aws} Regions. diff --git a/latest/ug/contribute/contribute.adoc b/latest/ug/contribute/contribute.adoc new file mode 100644 index 00000000..fa868a90 --- /dev/null +++ b/latest/ug/contribute/contribute.adoc @@ -0,0 +1,20 @@ +[[contribute,contribute.title]] +# Contribute to the EKS User Guide +:info_titleabbrev: Contribute + +include::../attributes.txt[] + + +{aws} is building an improved contribution experience for the EKS User Guide. + +The previous GitHub repository at `awsdocs/amazon-eks-user-guide` is temporarily unavailable while we prepare the new contribution system. + +The updated experience will use AsciiDoc, a powerful authoring language similar to markdown. AsciiDoc combines simple syntax with enterprise documentation features like advanced formatting, cross-referencing, and security controls. + +When the EKS User Guide returns to GitHub in mid-November, you'll be able to edit the documentation source files directly. Our streamlined process includes: + +* Faster pull request processing +* Reduced manual steps +* Automated content quality checks + +We look forward to your contributions. diff --git a/latest/ug/diagrams/README.adoc b/latest/ug/diagrams/README.adoc new file mode 100644 index 00000000..6cb2f9a6 --- /dev/null +++ b/latest/ug/diagrams/README.adoc @@ -0,0 +1,5 @@ += Diagram Source Files + +Diagram source files have moved to Amazon Workdocs. + +View the https://amazon.awsapps.com/workdocs-amazon/index.html#/folder/c87506fb95055627a2e80046310a1df2b0dfe8f66f185bea4c1d7e423a4a7f8b[AmazonEKSDocs-diagrams] folder. This folder is owned by `eks-docs`. \ No newline at end of file diff --git a/latest/ug/doc-history.adoc b/latest/ug/doc-history.adoc new file mode 100644 index 00000000..cdacfe8e --- /dev/null +++ b/latest/ug/doc-history.adoc @@ -0,0 +1,1882 @@ +//!!NODE_ROOT + +[.topic] +[[doc-history,doc-history.title]] +// H1 title is necessary, and must occur before the [abstract], but is unused in the web page (:info_title: is used instead, and :info_titleabbrev: is used in the ToC) += Document history +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Document history +:keywords: document, publish, release, history, log +:info_abstract: Important updates to the Amazon EKS documentation, sorted by date, with brief \ + descriptions of each update and when they occurred. + + +include::attributes.txt[] + +[abstract] +-- +Important updates to the Amazon EKS documentation, sorted by date, with brief descriptions of each update and when they occurred. +-- + +The following table describes the major updates and new features for the Amazon EKS User Guide. We also update the documentation frequently to address the feedback that you send us. + +[.updates] +== Updates + +[.update,date="2025-01-13"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Added multiple permissions to `AmazonEBSCSIDriverPolicy` to allow the Amazon EBS CSI Driver restore all snapshots, enable Fast Snapshot Restore (FSR) on EBS volumes, and modify tags on volumes. + +[.update,date="2024-12-26"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Added permissions to `AmazonEKSLoadBalancingPolicy`. + + +[.update,date="2024-12-20"] +=== Updated cluster insights +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-insights.html + +Amazon EKS upgrade insights will now warn about more cluster health and version compatibility issues. It can detect issues between different [.noloc]`Kubernetes` and Amazon EKS components such as `kubelet`, `kube-proxy`, and Amazon EKS add-ons. + + +[.update,date="2024-12-16"] +=== Node monitoring agent and auto repair +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/node-health.html + +You can use the new `eks-node-monitoring-agent` as an Amazon EKS add-on to detect and show health issues. You can also enable node auto repair to automatically replace nodes when issues are detected. + + +[.update,date="2024-12-01"] +=== Amazon EKS Hybrid Nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/hybrid-nodes-overview.html + +You can now run node on-premises connected to Amazon EKS clusters. With Amazon EKS Hybrid Nodes, you can use your on-premises and edge infrastructure as nodes in Amazon EKS clusters. {aws} manages the {aws}-hosted Kubernetes control plane of the Amazon EKS cluster, and you manage the hybrid nodes that run in your on-premises or edge environments. + + +[.update,date="2024-12-01"] +=== Amazon EKS Auto Mode +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/automode.html + +Amazon EKS Auto Mode fully automates Kubernetes cluster infrastructure management for compute, storage, and networking on {aws}. It simplifies Kubernetes management by automatically provisioning infrastructure, selecting optimal compute instances, dynamically scaling resources, continuously optimizing costs, patching operating systems, and integrating with {aws} security services. + + +[.update,date="2024-11-22"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Updated `AWSServiceRoleForAmazonEKSNodegroup` for compatibility with China regions. + + +[.update,date="2024-11-22"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +This is a new platform version with security fixes and enhancements. This includes new patch versions of Kubernetes `1.31.2`, `1.30.6`, `1.29.10`, and `1.28.15`. + + +[.update,date="2024-11-21"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS updated {aws} managed policy `AmazonEKSLocalOutpostClusterPolicy`. Added `ec2:DescribeAvailabilityZones` permission so the {aws} Cloud Controller Manager on the cluster control plane can identify the Availability Zone that each node is in. + + +[.update,date="2024-11-21"] +=== [.noloc]`Kubernetes` version 1.30 is now available for local clusters on {aws} Outposts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts-platform-versions.html + +You can now create an Amazon EKS local cluster on an {aws} Outposts using [.noloc]`Kubernetes` version 1.30. + + +[.update,date="2024-11-20"] +=== [.noloc]`Bottlerocket` AMIs that use FIPS 140-3 +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id-bottlerocket.html + +[.noloc]`Bottlerocket` AMIs are available that are preconfigured to use FIPS 140-3 validated cryptographic modules. This includes the Amazon Linux 2023 Kernel Crypto API Cryptographic Module and the {aws}-LC Cryptographic Module. + + +[.update,date="2024-11-20"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Updated `AWSServiceRoleForAmazonEKSNodegroup` policy to allow `ec2:RebootInstances` for instances created by Amazon EKS managed node groups. Restricted the `ec2:CreateTags` permissions for Amazon EC2 resources. + + +[.update,date="2024-11-18"] +=== Observability dashboard +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/observability-dashboard.html + +The observability dashboard helps you to quickly detect, troubleshoot, and remediate issues. There are also new link:eks/latest/userguide/cloudwatch.html[CloudWatch vended metrics,type="documentation"] available in the `AWS/EKS` namespace. + + +[.update,date="2024-11-16"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS updated {aws} managed policy `AmazonEKSServiceRolePolicy`. Added permissions for EKS access policies, load balancer management, and automated cluster resource cleanup. + + +[.update,date="2024-11-15"] +=== New role creation in console for add-ons that support EKS Pod Identities +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/creating-an-add-on.html#_create_add_on_console + +There are new steps when using the console to create or update add-ons that support EKS Pod Identities where you can automatically generate IAM roles with the appropriate name, role policy, and trust policy for the add-on. + + +[.update,date="2024-11-15"] +=== Managed node groups in {aws} Local Zones +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html + +Managed node groups can now be created in {aws} Local Zones. + + +[.update,date="2024-11-11"] +=== New metrics are available +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/view-raw-metrics.html + +There are new metrics available under the API group `metrics.eks.amazonaws.com`. + + +[.update,date="2024-11-07"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS updated {aws} managed policy `AmazonEKSComputePolicy`. Updated resource permissions for the `iam:AddRoleToInstanceProfile` action. + + +[.update,date="2024-11-01"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS added a new {aws} managed policy: `AmazonEKSComputePolicy` + + +[.update,date="2024-11-01"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Added permissions to `AmazonEKSClusterPolicy`. Added `ec2:DescribeInstanceTopology` permission to allow Amazon EKS to attach topology information to the node as labels. + + +[.update,date="2024-10-30"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS added a new {aws} managed policy: `AmazonEKSBlockStoragePolicy` + + + +[.update,date="2024-10-30"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS added a new {aws} managed policy: `AmazonEKSLoadBalancingPolicy` + + + +[.update,date="2024-10-29"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Added `cloudwatch:PutMetricData` permissions to `AmazonEKSServiceRolePolicy` to allow Amazon EKS to publish metrics to Amazon CloudWatch. + + + +[.update,date="2024-10-28"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +EKS added a new {aws} managed policy: `AmazonEKSNetworkingPolicy` + + + +[.update,date="2024-10-21"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Added `autoscaling:ResumeProcesses`, `autoscaling:SuspendProcesses`, and associated permissions to `AWSServiceRoleForAmazonEKSNodegroup` in China regions to integrate with Amazon Application Recovery Controller for EKS. No changes to other regions. + +[.update,date="2024-10-21"] +=== Dual-stack endpoints for new `IPv6` clusters +[.update-ulink] +link:eks/latest/userguide/cluster-endpoint.html[Configure endpoint access,type="documentation"] + +Connect to new `IPv6` clusters with a `eks-cluster.[.replaceable]``region``.api.aws` endpoint that is dual-stack. This endpoint is returned when you describe these clusters. `kubectl` and other Kubernetes API clients in `IPv4`, `IPv6`, or dual-stack environments can resolve and connect to these endpoints for public or private clusters. + + +[.update,date="2024-10-10"] +=== {aws} managed policy updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +Added permissions to `AmazonEKSServicePolicy` and `AmazonEKSServiceRolePolicy`. Added `ec2:GetSecurityGroupsForVpc` and associated tag permissions to allow EKS to read security group information and update related tags. + + +[.update,date="2024-10-11"] +=== AL2023 accelerated AMIs +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html + +You can now use accelerated `NVIDIA` and {aws} Neuron instances for AMIs based on AL2023. + + +[.update,date="2024-10-10"] +=== New source format + +We have switched over to a new source format with some layout changes. There are temporary minor formatting issues that we are addressing. + + +[.update,date="2024-10-03"] +=== {aws} managed policy updates - New policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +EKS added a new {aws} managed policy. + +[.update,date="2024-09-24"] +=== [.noloc]`Kubernetes` version `1.31` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.31 + +Added [.noloc]`Kubernetes` version `1.31` support for new clusters and version upgrades. + + +[.update,date="2024-08-21"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2024-08-20"] +=== [.noloc]`Kubernetes` version 1.29 is now available for local clusters on {aws} Outposts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts-platform-versions.html + +You can now create an Amazon EKS local cluster on an {aws} Outposts using [.noloc]`Kubernetes` version 1.29. + + +[.update,date="2024-08-14"] +=== EKS Pod Identity in {aws} GovCloud (US) +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/pod-identites.html + +Amazon EKS Pod Identities associate an IAM role with a [.noloc]`Kubernetes` service account. With this feature, you no longer need to provide extended permissions to the node IAM role. This way, [.noloc]`Pods` on that node can call {aws} APIs. Unlike IAM roles for service accounts, EKS Pod Identities are completely inside EKS; you don't need an [.noloc]`OIDC` identity provider. + + +[.update,date="2024-08-09"] +=== Scenario-driven content updates +We renamed and updated topics to be more scenario-driven throughout the entire guide. + + +[.update,date="2024-08-07"] +=== Dual-stack VPC interface endpoints for Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/vpc-interface-endpoints.html + +You can now create dual-stack VPC interface endpoints for Amazon EKS with both `IPv4` and `IPv6` IP addresses and DNS names. + + +[.update,date="2024-08-01"] +=== New dual-stack endpoints for the Amazon EKS APIs with `IPv6` addresses +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/network-reqs.html + +The EKS API for creating and managing clusters, and the OIDC issuer URLs for clusters have new dual-stack endpoints. The new DNS name for the Amazon EKS API is `eks.[.replaceable]``region``.api.aws` which resolves to `IPv4` addresses and `IPv6` addresses. New clusters have a new dual-stack OIDC issuer URL (`oidc-eks.[.replaceable]``region``.api.aws`). + + +[.update,date="2024-07-01"] +=== Capacity Blocks for managed node groups +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/capacity-blocks-mng.html + +You can now use Capacity Blocks for managed node groups. + + +[.update,date="2024-06-28"] +=== Auto Scaling Group metrics collection enabled by default +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/enable-asg-metrics.html + +Amazon EKS managed node groups now have Amazon EC2 Auto Scaling group metrics enabled by default with no additional charge. Previously, you had to do several steps to enable this feature. + + +[.update,date="2024-06-27"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2024-06-12"] +=== Improvements to AMI information references +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html + +We made improvements to the AMI information references, in particular for [.noloc]`Bottlerocket`. + + +[.update,date="2024-06-12"] +=== [.noloc]`Kubernetes` version `1.26` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.26 + +[.noloc]`Kubernetes` version `1.26` is now in extended support. + + +[.update,date="2024-05-23"] +=== [.noloc]`Kubernetes` version `1.30` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.30 + +Added [.noloc]`Kubernetes` version `1.30` support for new clusters and version upgrades. + + +[.update,date="2024-05-14"] +=== [.noloc]`CoreDNS` Autoscaling +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/coredns-autoscaling.html + +[.noloc]`CoreDNS` autoscaler will dynamically adapt the number of replicas of the [.noloc]`CoreDNS` deployment in an EKS cluster based on the number of nodes and CPU cores. This feature works for [.noloc]`CoreDNS` `v1.9` and the latest platform version of EKS release version `1.25` and later. + + +[.update,date="2024-05-14"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +This is a new platform version with security fixes and enhancements. This includes new patch versions of Kubernetes `1.29.4`, `1.28.9`, and `1.27.13`. + + +[.update,date="2024-04-10"] +=== CloudWatch [.noloc]`Container Insights` support for [.noloc]`Windows` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cloudwatch.html + +The Amazon CloudWatch Observability Operator add-on now also allows [.noloc]`Container Insights` on [.noloc]`Windows` worker nodes in the cluster. + + +[.update,date="2024-04-05"] +=== [.noloc]`Kubernetes` concepts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-concepts.html + +Added new Kubernetes concepts topic. + + +[.update,date="2024-04-02"] +=== Restructure Access and IAM Content +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-auth.html + +Move existing pages related to access and IAM topics, such as auth config map, access entries, Pod ID, and IRSA into new section. Revise overview content. + + +[.update,date="2024-03-13"] +=== [.noloc]`Bottlerocket` OS support for Amazon S3 CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/s3-csi.html + +The Mountpoint for Amazon S3 CSI driver is now compatible with [.noloc]`Bottlerocket`. + + +[.update,date="2024-03-04"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2024-02-29"] +=== Amazon Linux 2023 +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/al2023.html + +Amazon Linux 2023 (AL2023) is a new Linux-based operating system designed to provide a secure, stable, and high-performance environment for your cloud applications. + + +[.update,date="2024-02-26"] +=== EKS Pod Identity and IRSA support sidecars in [.noloc]`Kubernetes` `1.29` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.29 + +In [.noloc]`Kubernetes` `1.29`, sidecar containers are available in Amazon EKS clusters. Sidecar containers are supported with IAM roles for service accounts or EKS Pod Identity. For more information about sidecars, see https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/[Sidecar Containers] in the [.noloc]`Kubernetes` documentation. + + +[.update,date="2024-01-23"] +=== [.noloc]`Kubernetes` version `1.29` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.29 + +Added [.noloc]`Kubernetes` version `1.29` support for new clusters and version upgrades. + + +[.update,date="2024-01-16"] +=== Full release: Amazon EKS Extended Support for [.noloc]`Kubernetes` versions +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html + +Extended [.noloc]`Kubernetes` version support allows you to stay at a specific [.noloc]`Kubernetes` version for longer than 14 months. + + +[.update,date="2023-12-28"] +=== Amazon EKS cluster health detection in the {aws} Cloud +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html#cluster-health-status + +Amazon EKS detects issues with your Amazon EKS clusters and the infrastructure of the cluster prerequisites in _cluster health_. You can view the issues with your EKS clusters in the {aws-management-console} and in the `health` of the cluster in the EKS API. These issues are in addition to the issues that are detected by and displayed by the console. Previously, cluster health was only available for local clusters on {aws} Outposts. + + +[.update,date="2023-12-20"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Canada West (Calgary) (`ca-west-1`) {aws} Region. + + +[.update,date="2023-12-20"] +=== Cluster insights +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-insights.html + +You can now get recommendations on your cluster based on recurring checks. + + +[.update,date="2023-12-18"] +=== You can now grant IAM roles and users access to your cluster using access entries +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html + +Before the introduction of access entries, you granted IAM roles and users access to your cluster by adding entries to the `aws-auth` `ConfigMap`. Now each cluster has an access mode, and you can switch to using access entries on your schedule. After you switch modes, you can add users by adding access entries in the {aws} CLI, {aws} CloudFormation, and the {aws} SDKs. + + +[.update,date="2023-12-12"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +This is a new platform version with security fixes and enhancements. This includes new patch versions of Kubernetes `1.28.4`, `1.27.8`, `1.26.11`, and `1.25.16`. + + +[.update,date="2023-11-27"] +=== [.noloc]`Mountpoint` for Amazon S3 CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/s3-csi.html + +You can now install the [.noloc]`Mountpoint` for Amazon S3 CSI driver on Amazon EKS clusters. + + +[.update,date="2023-11-26"] +=== Amazon EKS Pod Identities +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/pod-identites.html + +Amazon EKS Pod Identities associate an IAM role with a [.noloc]`Kubernetes` service account. With this feature, you no longer need to provide extended permissions to the node IAM role. This way, [.noloc]`Pods` on that node can call {aws} APIs. Unlike IAM roles for service accounts, EKS Pod Identities are completely inside EKS; you don't need an [.noloc]`OIDC` identity provider. + + +[.update,date="2023-11-26"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2023-11-26"] +=== Turn on [.noloc]`Prometheus` metrics when creating a cluster +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/prometheus.html + +In the {aws-management-console}, you can now turn on [.noloc]`Prometheus` metrics when creating a cluster. You can also view [.noloc]`Prometheus` scraper details in the *Observability* tab. + + +[.update,date="2023-11-17"] +=== CSI snapshot controller +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/csi-snapshot-controller.html + +You can now install the CSI snapshot controller for use with compatible CSI drivers, such as the Amazon EBS CSI driver. + + +[.update,date="2023-11-14"] +=== ADOT Operator topic rewrite +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/opentelemetry.html + +The Amazon EKS add-on support for ADOT Operator section was redundant with the {aws} Distro for [.noloc]`OpenTelemetry` documentation. We migrated remaining essential information to that resource to reduce outdated and inconsistent information. + + +[.update,date="2023-11-10"] +=== [.noloc]`CoreDNS` EKS add-on support for Prometheus metrics +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html + +The `v1.10.1-eksbuild.5`, `v1.9.3-eksbuild.9`, and `v1.8.7-eksbuild.8` versions of the EKS add-on for [.noloc]`CoreDNS` expose the port that [.noloc]`CoreDNS` published metrics to, in the `kube-dns` service. This makes it easier to include the [.noloc]`CoreDNS` metrics in your monitoring systems. + + +[.update,date="2023-11-06"] +=== Amazon EKS CloudWatch Observability Operator add-on +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cloudwatch.html + +Added Amazon EKS CloudWatch Observability Operator page. + + +[.update,date="2023-10-31"] +=== Capacity Blocks for self-managed P5 instances in US East (Ohio) +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/capacity-blocks.html + +In US East (Ohio), you can now use Capacity Blocks for self-managed P5 instances. + + +[.update,date="2023-10-24"] +=== Clusters support modifying subnets and security groups +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/network-reqs.html + +You can update the cluster to change which subnets and security groups the cluster uses. You can update from the {aws-management-console}, the latest version of the {aws} CLI, {aws} CloudFormation, and `eksctl` version `v0.164.0-rc.0` or later. You might need to do this to provide subnets with more available IP addresses to successfully upgrade a cluster version. + + +[.update,date="2023-10-23"] +=== Cluster role and managed node group role supports customer managed {aws} Identity and Access Management policies +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-iam-role.html + +You can use a custom IAM policy on the cluster role, instead of the link:aws-managed-policy/latest/reference/AmazonEKSClusterPolicy.html[AmazonEKSClusterPolicy,type="documentation"] {aws} managed policy. Also, you can use a custom IAM policy on the node role in a managed node group, instead of the link:aws-managed-policy/latest/reference/AmazonEKSWorkerNodePolicy.html[AmazonEKSWorkerNodePolicy,type="documentation"] {aws} managed policy. Do this to create a policy with the least privilege to meet strict compliance requirements. + + +[.update,date="2023-10-06"] +=== Fix link to eksctl installation +Fix install link for eksctl after the page was moved. + + +[.update,date="2023-10-04"] +=== Preview release: Amazon EKS Extended Support for [.noloc]`Kubernetes` versions +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html + +Extended [.noloc]`Kubernetes` version support allows you to stay at a specific [.noloc]`Kubernetes` version for longer than 14 months. + + +[.update,date="2023-09-29"] +=== Remove references to {aws} App Mesh integration +Amazon EKS integrations with {aws} App Mesh remain for existing customers of App Mesh only. + + +[.update,date="2023-09-26"] +=== [.noloc]`Kubernetes` version `1.28` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.28 + +Added [.noloc]`Kubernetes` version `1.28` support for new clusters and version upgrades. + + +[.update,date="2023-09-15"] +=== [.noloc]`CoreDNS` Amazon EKS add-on supports modifying PDB +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/managing-coredns.html + +You can modify the `PodDisruptionBudget` of the EKS add-on for [.noloc]`CoreDNS` in versions `v1.9.3-eksbuild.7` and later and `v1.10.1-eksbuild.4` and later. + + +[.update,date="2023-09-15"] +=== Existing clusters support [.noloc]`Kubernetes` network policy enforcement in the [.noloc]`Amazon VPC CNI plugin for Kubernetes` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-network-policy.html + +You can use [.noloc]`Kubernetes` _network policy_ in existing clusters with the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, instead of requiring a third party solution. +You can use [.noloc]`Kubernetes` _network policy_ in existing clusters with the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, instead of requiring a third party solution. + + +[.update,date="2023-09-07"] +=== Amazon EKS support for shared subnets +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/network-reqs.html#network-requirements-shared + +New link:eks/latest/userguide/network-reqs.html#network-requirements-shared[Shared subnet requirements and considerations,type="documentation"] for making Amazon EKS clusters in shared subnets. + + +[.update,date="2023-09-06"] +=== Updates to What is Amazon EKS? +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html + +Added new link:eks/latest/userguide/common-use-cases.html[Common use cases,type="documentation"] and link:eks/latest/userguide/eks-architecture.html[Architecture,type="documentation"] topics. Refreshed other topics. + + +[.update,date="2023-08-29"] +=== [.noloc]`Kubernetes` network policy enforcement in the [.noloc]`Amazon VPC CNI plugin for Kubernetes` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-network-policy.html + +You can use [.noloc]`Kubernetes` _network policy_ with the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, instead of requiring a third party solution. +You can use [.noloc]`Kubernetes` _network policy_ with the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, instead of requiring a third party solution. + + +[.update,date="2023-08-01"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Israel (Tel Aviv) (`il-central-1`) {aws} Region. + + +[.update,date="2023-07-31"] +=== Configurable ephemeral storage for Fargate +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fargate-pod-configuration.html#fargate-storage + +You can increase the total amount of ephemeral storage for each [.noloc]`Pod` running on Amazon EKS Fargate. + + +[.update,date="2023-07-26"] +=== Add-on support for Amazon EFS CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html#add-ons-aws-efs-csi-driver + +You can now use the {aws-management-console}, {aws} CLI, and API to manage the Amazon EFS CSI driver. + + +[.update,date="2023-07-26"] +=== {aws} managed policy updates - New policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS added a new {aws} managed policy. + + +[.update,date="2023-07-20"] +=== [.noloc]`Kubernetes` version updates for 1.27, 1.26, 1.25, and 1.24 are now available for local clusters on {aws} Outposts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts-platform-versions.html + +[.noloc]`Kubernetes` version updates to 1.27.3, 1.26.6, 1.25.11, and 1.24.15 are now available for local clusters on {aws} Outposts + + +[.update,date="2023-07-06"] +=== IP prefixes support for [.noloc]`Windows` nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html + +Assigning IP prefixes to your nodes can enable you to host a significantly higher number of [.noloc]`Pods` on your nodes than you can when assigning individual secondary IP addresses to your nodes. + + +[.update,date="2023-06-30"] +=== Amazon FSx for OpenZFS CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fsx-openzfs-csi.html + +You can now install the Amazon FSx for OpenZFS CSI driver on Amazon EKS clusters. + + +[.update,date="2023-06-19"] +=== [.noloc]`Pods` on Linux nodes in `IPv4` clusters can now communicate with `IPv6` endpoints. +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6-egress.html + +After assigning an IPv6 address to your node, your [.noloc]`Pods`' `IPv4` address is network address translated to the `IPv6` address of the node that it's running on. + + +[.update,date="2023-05-30"] +=== [.noloc]`Windows` managed node groups in {aws} GovCloud (US) Regions +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/create-managed-node-group.html + +In the {aws} GovCloud (US) Regions, Amazon EKS managed node groups can now run [.noloc]`Windows` containers. + + +[.update,date="2023-05-24"] +=== [.noloc]`Kubernetes` version `1.27` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.27 + +Added [.noloc]`Kubernetes` version `1.27` support for new clusters and version upgrades. + + +[.update,date="2023-04-11"] +=== [.noloc]`Kubernetes` version `1.26` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.26 + +Added [.noloc]`Kubernetes` version `1.26` support for new clusters and version upgrades. + + +[.update,date="2023-03-27"] +=== Domainless [.noloc]`gMSA` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html#ad-and-gmsa-support + +You can now use domainless [.noloc]`gMSA` with [.noloc]`Windows` [.noloc]`Pods`. + + +[.update,date="2023-03-10"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Asia Pacific (Melbourne) (`ap-southeast-4`) {aws} Region. + + +[.update,date="2023-03-03"] +=== Amazon File Cache CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/file-cache-csi.html + +You can now install the Amazon File Cache CSI driver on Amazon EKS clusters. + + +[.update,date="2023-03-01"] +=== [.noloc]`Kubernetes` version 1.25 is now available for local clusters on {aws} Outposts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts-local-cluster-create.html + +You can now create an Amazon EKS local cluster on an Outpost using [.noloc]`Kubernetes` versions `1.22` – `1.25`. + + +[.update,date="2023-02-22"] +=== [.noloc]`Kubernetes` version `1.25` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.25 + +Added [.noloc]`Kubernetes` version `1.25` support for new clusters and version upgrades. + + +[.update,date="2023-02-07"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2023-02-06"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Asia Pacific (Hyderabad) (`ap-south-2`), Europe (Zurich) (`eu-central-2`), and Europe (Spain) (`eu-south-2`) {aws} Regions. + + +[.update,date="2023-01-17"] +=== [.noloc]`Kubernetes` versions `1.21` – `1.24` are now available for local clusters on {aws} Outposts. +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts-local-cluster-create.html + +You can now create an Amazon EKS local cluster on an Outpost using [.noloc]`Kubernetes` versions `1.21` – `1.24`. Previously, only version `1.21` was available. + + +[.update,date="2022-12-16"] +=== Amazon EKS now supports {aws} PrivateLink +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/vpc-interface-endpoints.html + +You can use an {aws} PrivateLink to create a private connection between your VPC and Amazon EKS. + + +[.update,date="2022-12-15"] +=== Managed node group Windows support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + +You can now use Windows for Amazon EKS managed node groups. + + +[.update,date="2022-11-28"] +=== Amazon EKS add-ons from independent software vendors are now available in the {aws} Marketplace +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html + +You can now browse and subscribe to Amazon EKS add-ons from independent software vendors through the {aws} Marketplace. + + +[.update,date="2022-11-17"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2022-11-15"] +=== [.noloc]`Kubernetes` version `1.24` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.24 + +Added [.noloc]`Kubernetes` version `1.24` support for new clusters and version upgrades. + + +[.update,date="2022-11-03"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Middle East (UAE) (`me-central-1`) {aws} Region. + + +[.update,date="2022-10-24"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2022-10-20"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2022-09-19"] +=== Local clusters on {aws} Outposts are now available +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts-local-cluster-create.html + +You can now create an Amazon EKS local cluster on an Outpost. + + +[.update,date="2022-09-08"] +=== Fargate vCPU based quotas +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/service-quotas.html#service-quotas-eks-fargate + +Fargate is transitioning from [.noloc]`Pod` based quotas to vCPU based quotas. + + +[.update,date="2022-08-31"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2022-08-24"] +=== {aws} managed policy updates - New policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS added a new {aws} managed policy. + + +[.update,date="2022-08-24"] +=== Cost monitoring +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cost-monitoring + +Amazon EKS now supports [.noloc]`Kubecost`, which enables you to monitor costs broken down by [.noloc]`Kubernetes` resources including [.noloc]`Pods`, nodes, namespaces, and labels. + + +[.update,date="2022-08-23"] +=== {aws} managed policy updates - New policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS added a new {aws} managed policy. + + +[.update,date="2022-08-16"] +=== Tag resources for billing +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-using-tags.html#tag-resources-for-billing + +Added `aws:eks:cluster-name` generated cost allocation tag support for all clusters. + + +[.update,date="2022-08-16"] +=== Fargate profile wildcards +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fargate-profile.html#fargate-profile-wildcards + +Added support for Fargate profile wildcards in the selector criteria for namespaces, label keys, and label values. + + +[.update,date="2022-08-11"] +=== [.noloc]`Kubernetes` version `1.23` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#kubernetes-1.23 + +Added [.noloc]`Kubernetes` version `1.23` support for new clusters and version upgrades. + + +[.update,date="2022-05-03"] +=== View [.noloc]`Kubernetes` resources in the {aws-management-console} +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/view-kubernetes-resources.html + +You can now view information about the [.noloc]`Kubernetes` resources deployed to your cluster using the {aws-management-console}. + + +[.update,date="2022-05-02"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Asia Pacific (Jakarta) (`ap-southeast-3`) {aws} Region. + + +[.update,date="2022-04-21"] +=== Observability page and ADOT add-on support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-observe.html + +Added Observability page and {aws} Distro for [.noloc]`OpenTelemetry` (ADOT). + + +[.update,date="2022-04-04"] +=== {aws} managed policy updates - New policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS added a new {aws} managed policy. + + +[.update,date="2022-04-04"] +=== [.noloc]`Kubernetes` version `1.22` +Added [.noloc]`Kubernetes` version `1.22` support for new clusters and version upgrades. + + +[.update,date="2022-04-01"] +=== Added Fargate [.noloc]`Pod` patching details +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fargate-pod-patching.html + +When upgrading Fargate [.noloc]`Pods`, Amazon EKS first tries to evict [.noloc]`Pods` based on your [.noloc]`Pod` disruption budgets. You can create event rules to react to failed evictions before the [.noloc]`Pods` are deleted. + + +[.update,date="2022-03-31"] +=== Full release: Add-on support for Amazon EBS CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html + +You can now use the {aws-management-console}, {aws} CLI, and API to manage the Amazon EBS CSI driver. + + +[.update,date="2022-03-22"] +=== {aws} Outposts content update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/outposts.html + +Instructions to deploy an Amazon EKS cluster on {aws} Outposts. + + +[.update,date="2022-03-21"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2022-03-14"] +=== Windows `containerd` support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html.html + +You can now select the `containerd` runtime for [.noloc]`Windows` nodes. + + +[.update,date="2022-02-25"] +=== Added Amazon EKS Connector considerations to security documentation +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/connector-considerations.html + +Describes the shared responsibility model as it relates to connected clusters. + + +[.update,date="2022-01-06"] +=== Assign `IPv6` addresses to your [.noloc]`Pods` and services +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html + +You can now create a `1.21` or later cluster that assigns `IPv6` addresses to your [.noloc]`Pods` and services. + + +[.update,date="2021-12-13"] +=== {aws} managed policy updates - Update to an existing policy +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html#security-iam-awsmanpol-updates + +Amazon EKS updated an existing {aws} managed policy. + + +[.update,date="2021-12-09"] +=== Preview release: Add-on support for Amazon EBS CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html + +You can now preview using the {aws-management-console}, {aws} CLI, and API to manage the Amazon EBS CSI driver. + + +[.update,date="2021-11-29"] +=== Karpenter autoscaler support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html#karpenter + +You can now use the Karpenter open-source project to autoscale your nodes. + + +[.update,date="2021-11-10"] +=== Fluent Bit [.noloc]`Kubernetes` filter support in Fargate logging +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fargate-logging.html#fargate-logging-kubernetes-filter + +You can now use the Fluent Bit [.noloc]`Kubernetes` filter with Fargate logging. + + +[.update,date="2021-11-09"] +=== [.noloc]`Windows` support available in the control plane +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html + +[.noloc]`Windows` support is now available in your control plane. You no longer need to enable it in your data plane. + + +[.update,date="2021-10-28"] +=== [.noloc]`Bottlerocket` added as an AMI type for managed node groups +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html + +Previously, [.noloc]`Bottlerocket` was only available as a self-managed node option. Now it can be configured as a managed node group, reducing the effort that's required to meet node compliance requirements. + + +[.update,date="2021-10-25"] +=== DL1 driver support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-ami-build-scripts.html + +Custom Amazon Linux AMIs now support deep learning workloads for Amazon Linux 2. This enablement allows a generic on-premises or cloud baseline configuration. + + +[.update,date="2021-09-13"] +=== VT1 video support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-ami-build-scripts.html + +Custom Amazon Linux AMIs now support VT1 for some distributions. This enablement advertises Xilinx U30 devices on your Amazon EKS cluster. + + +[.update,date="2021-09-08"] +=== Amazon EKS Anywhere is now available +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-deployment-options.html + +Amazon EKS Anywhere is a new deployment option for Amazon EKS that you can use to create and operate [.noloc]`Kubernetes` clusters on-premises. + + +[.update,date="2021-09-08"] +=== Amazon EKS Connector is now available +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-connector.html + +You can use Amazon EKS Connector to register and connect any conformant [.noloc]`Kubernetes` cluster to {aws} and visualize it in the Amazon EKS console. + + +[.update,date="2021-09-02"] +=== Amazon FSx for NetApp ONTAP CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fsx-ontap.html + +Added topic that summarizes the Amazon FSx for NetApp ONTAP CSI driver and gives links to other references. + + +[.update,date="2021-08-30"] +=== Managed node groups now auto-calculates the Amazon EKS recommended maximum [.noloc]`Pods` for nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html + +Managed node groups now auto-calculate the Amazon EKS maximum [.noloc]`Pods` for nodes that you deploy without a launch template, or with a launch template that you haven't specified an AMI ID in. + + +[.update,date="2021-08-20"] +=== Remove Amazon EKS management of add-on settings without removing the Amazon EKS add-on software +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html#removing-vpc-cni-eks-add-on + +You can now remove an Amazon EKS add-on without removing the add-on software from your cluster. + + +[.update,date="2021-08-02"] +=== Create multi-homed [.noloc]`Pods` using Multus +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/pod-multiple-network-interfaces.html + +You can now add multiple network interfaces to a [.noloc]`Pod` using Multus. + + +[.update,date="2021-07-27"] +=== Add more IP addresses to your [.noloc]`Linux` Amazon EC2 nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html + +You can now add significantly more IP addresses to your [.noloc]`Linux` Amazon EC2 nodes. This means that you can run a higher density of [.noloc]`Pods` on each node. +You can now add significantly more IP addresses to your [.noloc]`Linux` Amazon EC2 nodes. This means that you can run a higher density of [.noloc]`Pods` on each node. + + +[.update,date="2021-07-19"] +=== [.noloc]`Kubernetes` version `1.21` +Added [.noloc]`Kubernetes` version `1.21` support. + + +[.update,date="2021-07-19"] +=== `containerd` runtime bootstrap +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#containerd-bootstrap + +The Amazon EKS optimized accelerated Amazon Linux Amazon Machine Image (AMI) now contains a bootstrap flag that you can use to enable the `containerd` runtime in Amazon EKS optimized and [.noloc]`Bottlerocket` AMIs. This flag is available in all supported [.noloc]`Kubernetes` versions of the AMI. + + +[.update,date="2021-06-17"] +=== Added managed policies topic +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-iam-awsmanpol.html + +A list of all Amazon EKS IAM managed policies and changes that were made to them since June 17, 2021. + + +[.update,date="2021-06-01"] +=== Use security groups for [.noloc]`Pods` with Fargate +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html + +You can now use security groups for [.noloc]`Pods` with Fargate, in addition to using them with Amazon EC2 nodes. + + +[.update,date="2021-05-19"] +=== Added [.noloc]`CoreDNS` and `kube-proxy` Amazon EKS add-ons +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html + +Amazon EKS can now help you manage the [.noloc]`CoreDNS` and `kube-proxy` Amazon EKS add-ons for your cluster. + + +[.update,date="2021-05-18"] +=== [.noloc]`Kubernetes` version `1.20` +Added [.noloc]`Kubernetes` version `1.20` support for new clusters and version upgrades. + + +[.update,date="2021-05-14"] +=== [.noloc]`{aws} Load Balancer Controller` `2.2.0` released +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html + +You can now use the [.noloc]`{aws} Load Balancer Controller` to create Elastic Load Balancers using instance or IP targets. + + +[.update,date="2021-05-11"] +=== Node taints for managed node groups +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html + +Amazon EKS now supports adding note taints to managed node groups. + + +[.update,date="2021-02-26"] +=== Secrets encryption for existing clusters +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html#enable-kms + +Amazon EKS now supports adding https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/[secrets encryption] to existing clusters. + + +[.update,date="2021-02-16"] +=== [.noloc]`Kubernetes` version `1.19` +Added [.noloc]`Kubernetes` version `1.19` support for new clusters and version upgrades. + + +[.update,date="2021-02-12"] +=== Amazon EKS now supports [.noloc]`OpenID Connect` (OIDC) identity providers as a method to authenticate users to a version `1.16` or later cluster. +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/authenticate-oidc-identity-provider.html + +OIDC identity providers can be used with, or as an alternative to {aws} Identity and Access Management (IAM). + + +[.update,date="2020-12-01"] +=== Amazon EKS can now manage specific add-ons for your cluster +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html + +You can manage add-ons yourself, or allow Amazon EKS to control the launch and version of an add-on through the Amazon EKS API. + + +[.update,date="2020-12-01"] +=== Deploy Spot Instance types in a managed node group +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types + +You can now deploy multiple Spot or On-Demand Instance types to a managed node group. + + +[.update,date="2020-12-01"] +=== View node and workload resources in the {aws-management-console} +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/view-kubernetes-resources.html + +You can now view details about your managed, self-managed, and Fargate nodes and your deployed [.noloc]`Kubernetes` workloads in the {aws-management-console}. + + +[.update,date="2020-10-23"] +=== NLB IP target support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/networkg-load-balancing.html#network-load-balancer + +You can now deploy a Network Load Balancer with IP targets. This means that you can use an NLB to load balance network traffic to Fargate [.noloc]`Pods` and directly to [.noloc]`Pods` that are running on Amazon EC2 nodes. + + +[.update,date="2020-10-23"] +=== Share an ALB across multiple Ingresses +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html + +You can now share an {aws} Application Load Balancer (ALB) across multiple [.noloc]`Kubernetes` Ingresses. In the past, you had to deploy a separate ALB for each Ingress. + + +[.update,date="2020-10-13"] +=== [.noloc]`Kubernetes` version `1.18` +Added [.noloc]`Kubernetes` version `1.18` support for new clusters and version upgrades. + + +[.update,date="2020-09-29"] +=== Specify a custom CIDR block for [.noloc]`Kubernetes` service IP address assignment. +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html + +You can now specify a custom CIDR block that [.noloc]`Kubernetes` assigns service IP addresses from. + + +[.update,date="2020-09-09"] +=== Assign security groups to individual [.noloc]`Pods` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html + +You can now associate different security groups to some of the individual [.noloc]`Pods` that are running on many Amazon EC2 instance types. + + +[.update,date="2020-08-31"] +=== Deploy [.noloc]`Bottlerocket` on your nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/launch-node-bottlerocket.html + +You can now deploy nodes that are running link:bottlerocket/[Bottlerocket,type="marketing"]. + + +[.update,date="2020-08-17"] +=== Managed node group launch templates and custom AMI +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html + +You can now deploy a managed node group that uses an Amazon EC2 launch template. The launch template can specify a custom AMI, if you choose. + + +[.update,date="2020-08-17"] +=== The ability to launch Arm nodes is generally available +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#arm-ami + +You can now launch Arm nodes in managed and self-managed node groups. + + +[.update,date="2020-08-17"] +=== EFS support for {aws} Fargate +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + +You can now use Amazon EFS with {aws} Fargate. + + +[.update,date="2020-08-12"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +This is a new platform version with security fixes and enhancements. This includes UDP support for services of type `LoadBalancer` when using Network Load Balancers with [.noloc]`Kubernetes` version `1.15` or later. For more information, see the https://github.com/kubernetes/kubernetes/pull/92109[Allow UDP for {aws} Network Load Balancer] issue on [.noloc]`GitHub`. + + +[.update,date="2020-08-06"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Africa (Cape Town) (`af-south-1`) and Europe (Milan) (`eu-south-1`) {aws} Regions. + + +[.update,date="2020-08-03"] +=== Fargate usage metrics +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/monitoring-fargate-usage.html + +{aws} Fargate provides CloudWatch usage metrics that provide visibility into your account's usage of Fargate On-Demand resources. + + +[.update,date="2020-07-10"] +=== [.noloc]`Kubernetes` version `1.17` +Added [.noloc]`Kubernetes` version `1.17` support for new clusters and version upgrades. + + +[.update,date="2020-06-18"] +=== Create and manage App Mesh resources from within [.noloc]`Kubernetes` with the App Mesh controller for [.noloc]`Kubernetes` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/mesh-k8s-integration.html + +You can create and manage App Mesh resources from within [.noloc]`Kubernetes`. The controller also automatically injects the Envoy proxy and init containers into [.noloc]`Pods` that you deploy. + + +[.update,date="2020-06-04"] +=== Amazon EKS now supports Amazon EC2 Inf1 nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/inferentia-support.html + +You can add Amazon EC2 Inf1 nodes to your cluster. + + +[.update,date="2020-05-13"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the {aws} GovCloud (US-East) (`us-gov-east-1`) and {aws} GovCloud (US-West) (`us-gov-west-1`) {aws} Regions. + + +[.update,date="2020-05-12"] +=== [.noloc]`Kubernetes` `1.12` is no longer supported on Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html + +[.noloc]`Kubernetes` version `1.12` is no longer supported on Amazon EKS. Update any `1.12` clusters to version `1.13` or later to avoid service interruption. + + +[.update,date="2020-04-30"] +=== [.noloc]`Kubernetes` version `1.16` +Added [.noloc]`Kubernetes` version `1.16` support for new clusters and version upgrades. + + +[.update,date="2020-04-16"] +=== Added the *AWSServiceRoleForAmazonEKS* service-linked role +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/using-service-linked-roles-eks.html + +Added the *AWSServiceRoleForAmazonEKS* service-linked role. + + +[.update,date="2020-03-10"] +=== [.noloc]`Kubernetes` version `1.15` +Added [.noloc]`Kubernetes` version `1.15` support for new clusters and version upgrades. + + +[.update,date="2020-02-26"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Beijing (`cn-north-1`) and Ningxia (`cn-northwest-1`) {aws} Regions. + + +[.update,date="2019-12-23"] +=== FSx for Lustre CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fsx-csi.html + +Added topic for installing the FSx for Lustre CSI driver on [.noloc]`Kubernetes` `1.14` Amazon EKS clusters. + + +[.update,date="2019-12-20"] +=== Restrict network access to the public access endpoint of a cluster +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html + +With this update, you can use Amazon EKS to restrict the CIDR ranges that can communicate to the public access endpoint of the [.noloc]`Kubernetes` API server. + + +[.update,date="2019-12-13"] +=== Resolve the private access endpoint address for a cluster from outside of a VPC +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html + +With this update, you can use Amazon EKS to resolve the private access endpoint of the [.noloc]`Kubernetes` API server from outside of a VPC. + + +[.update,date="2019-12-04"] +=== (Beta) Amazon EC2 A1 Amazon EC2 instance nodes +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/arm-support.html + +Launch link:ec2/instance-types/a1/[Amazon EC2 A1,type="marketing"] Amazon EC2 instance nodes that register with your Amazon EKS cluster. + + +[.update,date="2019-12-03"] +=== Creating a cluster on {aws} Outposts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-on-outposts.html + +Amazon EKS now supports creating clusters on {aws} Outposts. + + +[.update,date="2019-12-03"] +=== {aws} Fargate on Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/fargate.html + +Amazon EKS [.noloc]`Kubernetes` clusters now support running [.noloc]`Pods` on Fargate. + + +[.update,date="2019-11-21"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Canada (Central) (`ca-central-1`) {aws} Region. + + +[.update,date="2019-11-18"] +=== Managed node groups +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html + +Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS [.noloc]`Kubernetes` clusters. + + +[.update,date="2019-11-06"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform versions to address https://groups.google.com/forum/#!msg/kubernetes-security-announce/jk8polzSUxs/dfq6a-MnCQAJ[CVE-2019-11253]. + + +[.update,date="2019-11-04"] +=== [.noloc]`Kubernetes` `1.11` is no longer supported on Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html + +[.noloc]`Kubernetes` version `1.11` is no longer supported on Amazon EKS. Please update any `1.11` clusters to version `1.12` or higher to avoid service interruption. + + +[.update,date="2019-10-16"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the South America (São Paulo) (`sa-east-1`) {aws} Region. + + +[.update,date="2019-10-07"] +=== [.noloc]`Windows` support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html + +Amazon EKS clusters running [.noloc]`Kubernetes` version `1.14` now support [.noloc]`Windows` workloads. + + +[.update,date="2019-09-30"] +=== Autoscaling +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html + +Added a chapter to cover some of the different types of [.noloc]`Kubernetes` autoscaling that are supported on Amazon EKS clusters. + + +[.update,date="2019-09-28"] +=== [.noloc]`Kubernetes` Dashboard update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html + +Updated topic for installing the [.noloc]`Kubernetes` Dashboard on Amazon EKS clusters to use the beta `2.0` version. + + +[.update,date="2019-09-19"] +=== Amazon EFS CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + +Added topic for installing the Amazon EFS CSI driver on [.noloc]`Kubernetes` `1.14` Amazon EKS clusters. + + +[.update,date="2019-09-18"] +=== Amazon EC2 Systems Manager parameter for Amazon EKS optimized AMI ID +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html + +Added topic for retrieving the Amazon EKS optimized AMI ID using an Amazon EC2 Systems Manager parameter. The parameter eliminates the need for you to look up AMI IDs. + + +[.update,date="2019-09-16"] +=== Amazon EKS resource tagging +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-using-tags.html + +You can manage the tagging of your Amazon EKS clusters. + + +[.update,date="2019-09-09"] +=== Amazon EBS CSI driver +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html + +Added topic for installing the Amazon EBS CSI driver on [.noloc]`Kubernetes` `1.14` Amazon EKS clusters. + + +[.update,date="2019-09-06"] +=== New Amazon EKS optimized AMI patched for `CVE-2019-9512` and `CVE-2019-9514` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMI to address https://groups.google.com/forum/#!topic/kubernetes-security-announce/wlHLHit1BqA[CVE-2019-9512 and CVE-2019-9514]. + + +[.update,date="2019-09-04"] +=== Announcing deprecation of [.noloc]`Kubernetes` `1.11` in Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html + +Amazon EKS discontinued support for [.noloc]`Kubernetes` version `1.11` on November 4, 2019. + + +[.update,date="2019-09-03"] +=== IAM roles for service accounts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + +With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a [.noloc]`Kubernetes` service account. With this feature, you no longer need to provide extended permissions to the node IAM role. This way, [.noloc]`Pods` on that node can call {aws} APIs. + + +[.update,date="2019-09-03"] +=== [.noloc]`Kubernetes` version `1.14` +Added [.noloc]`Kubernetes` version `1.14` support for new clusters and version upgrades. + + +[.update,date="2019-08-29"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Middle East (Bahrain) (`me-south-1`) {aws} Region. + + +[.update,date="2019-08-28"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform versions to address https://groups.google.com/forum/#!topic/kubernetes-security-announce/wlHLHit1BqA[CVE-2019-9512 and CVE-2019-9514]. + + +[.update,date="2019-08-05"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform versions to address https://groups.google.com/forum/#!topic/kubernetes-security-announce/vUtEcSEY6SM[CVE-2019-11247 and CVE-2019-11249]. + + +[.update,date="2019-07-31"] +=== Amazon EKS Region expansion +Amazon EKS is now available in the Asia Pacific (Hong Kong) (`ap-east-1`) {aws} Region. + + +[.update,date="2019-07-30"] +=== [.noloc]`Kubernetes` `1.10` no longer supported on Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html + +[.noloc]`Kubernetes` version `1.10` is no longer supported on Amazon EKS. Update any `1.10` clusters to version `1.11` or higher to avoid service interruption. + + +[.update,date="2019-07-11"] +=== Added topic on ALB ingress controller +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html + +The {aws} ALB Ingress Controller for [.noloc]`Kubernetes` is a controller that causes an ALB to be created when ingress resources are created. + + +[.update,date="2019-07-03"] +=== New Amazon EKS optimized AMI +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Removing unnecessary `kubectl` binary from AMIs. + + +[.update,date="2019-06-18"] +=== [.noloc]`Kubernetes` version `1.13` +Added [.noloc]`Kubernetes` version `1.13` support for new clusters and version upgrades. + + +[.update,date="2019-06-17"] +=== New Amazon EKS optimized AMI patched for `{aws}-2019-005` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMI to address the vulnerabilities that are described in link:security/security-bulletins/{aws}-2019-005/[{aws}-2019-005,type="marketing"]. + + +[.update,date="2019-05-21"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform version for [.noloc]`Kubernetes` `1.11` and `1.10` clusters to support custom DNS names in the `kubelet` certificate and improve `etcd` performance. + + +[.update,date="2019-05-21"] +=== Announcing discontinuation of support of [.noloc]`Kubernetes` `1.10` in Amazon EKS +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html + +Amazon EKS stopped supporting [.noloc]`Kubernetes` version `1.10` on July 22, 2019. + + +[.update,date="2019-05-10"] +=== Getting started with `eksctl` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html + +This getting started guide describes how you can install all of the required resources to get started with Amazon EKS using `eksctl`. This is a simple command line utility for creating and managing [.noloc]`Kubernetes` clusters on Amazon EKS. + + +[.update,date="2019-05-10"] +=== {aws} CLI `get-token` command +The `aws eks get-token` command was added to the {aws} CLI. You no longer need to install the {aws} IAM Authenticator for [.noloc]`Kubernetes` to create client security tokens for cluster API server communication. Upgrade your {aws} CLI installation to the latest version to use this new functionality. For more information, see link:cli/latest/userguide/installing.html[Installing the {aws} Command Line Interface,type="documentation"] in the _{aws} Command Line Interface User Guide_. + + +[.update,date="2019-05-08"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform version for [.noloc]`Kubernetes` `1.12` clusters to support custom DNS names in the `kubelet` certificate and improve `etcd` performance. This fixes a bug that caused node `kubelet` daemons to request a new certificate every few seconds. + + +[.update,date="2019-04-05"] +=== [.noloc]`Prometheus` tutorial +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/prometheus.html + +Added topic for deploying [.noloc]`Prometheus` to your Amazon EKS cluster. + + +[.update,date="2019-04-04"] +=== Amazon EKS control plane logging +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +With this update, you can get audit and diagnostic logs directly from the Amazon EKS control pane. You can use these CloudWatch logs in your account as reference for securing and running clusters. + + +[.update,date="2019-03-28"] +=== [.noloc]`Kubernetes` version `1.12` +Added [.noloc]`Kubernetes` version `1.12` support for new clusters and version upgrades. + + +[.update,date="2019-03-27"] +=== Added App Mesh getting started guide +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/mesh-gs-k8s.html + +Added documentation for getting started with App Mesh and [.noloc]`Kubernetes`. + + +[.update,date="2019-03-19"] +=== Amazon EKS API server endpoint private access +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html + +Added documentation for disabling public access for your Amazon EKS cluster's [.noloc]`Kubernetes` API server endpoint. + + +[.update,date="2019-03-18"] +=== Added topic for installing the [.noloc]`Kubernetes` Metrics Server +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html + +The [.noloc]`Kubernetes` Metrics Server is an aggregator of resource usage data in your cluster. + + +[.update,date="2019-03-15"] +=== Added list of related open source projects +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html + +These open source projects extend the functionality of [.noloc]`Kubernetes` clusters running on {aws}, including clusters that are managed by Amazon EKS. + + +[.update,date="2019-03-11"] +=== Added topic for installing Helm locally +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/helm.html + +The `helm` package manager for [.noloc]`Kubernetes` helps you install and manage applications on your [.noloc]`Kubernetes` cluster. This topic shows how to install and run the `helm` and `tiller` binaries locally. That way, you can install and manage charts using the Helm CLI on your local system. + + +[.update,date="2019-03-08"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform version that updates Amazon EKS [.noloc]`Kubernetes` `1.11` clusters to patch level `1.11.8` to address https://discuss.kubernetes.io/t/kubernetes-security-announcement-v1-11-8-1-12-6-1-13-4-released-to-address-medium-severity-cve-2019-1002100/5147[CVE-2019-1002100]. + + +[.update,date="2019-02-13"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Europe (London) (`eu-west-2`), Europe (Paris) (`eu-west-3`), and Asia Pacific (Mumbai) (``ap-south-1`) {aws} Regions. + + +[.update,date="2019-02-13"] +=== Increased cluster limit +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/service_limits.html + +Amazon EKS has increased the number of clusters that you can create in an {aws} Region from 3 to 50. + + +[.update,date="2019-02-11"] +=== New Amazon EKS optimized AMI patched for `ALAS-2019-1156` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMI to address the vulnerability that's described in https://alas.aws.amazon.com/ALAS-2019-1156.html[ALAS-2019-1156]. + + +[.update,date="2019-01-09"] +=== New Amazon EKS optimized AMI patched for `ALAS2-2019-1141` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMI to address the CVEs that are referenced in https://alas.aws.amazon.com/AL2/ALAS-2019-1141.html[ALAS2-2019-1141]. + + +[.update,date="2019-01-09"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Asia Pacific (Seoul) (`ap-northeast-2`) {aws} Region. + + +[.update,date="2018-12-19"] +=== Amazon EKS region expansion +Amazon EKS is now available in the following additional {aws} Regions: Europe (Frankfurt) (`eu-central-1`), Asia Pacific (Tokyo) (`ap-northeast-1`), Asia Pacific (Singapore) (`ap-southeast-1`), and Asia Pacific (Sydney) (`ap-southeast-2`). + + +[.update,date="2018-12-12"] +=== Amazon EKS cluster updates +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html + +Added documentation for Amazon EKS link:eks/latest/userguide/update-cluster.html[cluster Kubernetes version updates,type="documentation"] and link:eks/latest/userguide/update-workers.html[node replacement,type="documentation"]. + + +[.update,date="2018-12-11"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Europe (Stockholm) (`eu-north-1`) {aws} Region. + + +[.update,date="2018-12-04"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform version updating [.noloc]`Kubernetes` to patch level `1.10.11` to address link:security/security-bulletins/{aws}-2018-020/[CVE-2018-1002105,type="marketing"]. + + +[.update,date="2018-11-20"] +=== Added version `1.0.0` support for the ALB ingress controller +[.update-ulink] +https://github.com/kubernetes-sigs/aws-alb-ingress-controller + +The ALB ingress controller releases version `1.0.0` with formal support from {aws}. + + +[.update,date="2018-10-16"] +=== Added support for CNI network configuration +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html + +The [.noloc]`Amazon VPC CNI plugin for Kubernetes` version `1.2.1` now supports custom network configuration for secondary [.noloc]`Pod` network interfaces. + + +[.update,date="2018-10-10"] +=== Added support for `MutatingAdmissionWebhook` and `ValidatingAdmissionWebhook` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +Amazon EKS platform version `1.10-eks.2` now supports `MutatingAdmissionWebhook` and `ValidatingAdmissionWebhook` admission controllers. + + +[.update,date="2018-10-03"] +=== Added partner AMI information +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-partner-amis.html + +Canonical has partnered with Amazon EKS to create node AMIs that you can use in your clusters. + + +[.update,date="2018-09-21"] +=== Added instructions for {aws} CLI `update-kubeconfig` command +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html + +Amazon EKS has added the `update-kubeconfig` to the {aws} CLI to simplify the process of creating a `kubeconfig` file for accessing your cluster. + + +[.update,date="2018-09-13"] +=== New Amazon EKS optimized AMIs +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMIs (with and without GPU support) to provide various security fixes and AMI optimizations. + + +[.update,date="2018-09-05"] +=== Amazon EKS {aws} Region expansion +Amazon EKS is now available in the Europe (Ireland) (`eu-west-1`) Region. + + +[.update,date="2018-08-31"] +=== Amazon EKS platform version update +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + +New platform version with support for [.noloc]`Kubernetes` https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/[aggregation layer] and the https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/[Horizontal Pod Autoscaler](HPA). + + +[.update,date="2018-08-22"] +=== New Amazon EKS optimized AMIs and GPU support +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMI to use a new {aws} CloudFormation node template and https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap script]. In addition, a new link:eks/latest/userguide/eks-optimized-ami.html#gpu-ami[Amazon EKS optimized AMI with GPU support,type="documentation"] is available. + + +[.update,date="2018-08-14"] +=== New Amazon EKS optimized AMI patched for `ALAS2-2018-1058` +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has updated the Amazon EKS optimized AMI to address the CVEs that are referenced in https://alas.aws.amazon.com/AL2/ALAS-2018-1058.html[ALAS2-2018-1058]. + + +[.update,date="2018-07-10"] +=== Amazon EKS optimized AMI build scripts +[.update-ulink] +https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html + +Amazon EKS has open-sourced the build scripts that are used to build the Amazon EKS optimized AMI. These build scripts are now available on [.noloc]`GitHub`. + + +[.update,date="2018-06-05"] +=== Amazon EKS initial release +Initial documentation for service launch + +[.level] +== {blank} + +[.update-history] +|=== +|=== diff --git a/latest/ug/getting-started/getting-started-automode.adoc b/latest/ug/getting-started/getting-started-automode.adoc new file mode 100644 index 00000000..734e148b --- /dev/null +++ b/latest/ug/getting-started/getting-started-automode.adoc @@ -0,0 +1,31 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[getting-started-automode,getting-started-automode.title]] += Get started with Amazon EKS – EKS Auto Mode +:info_doctype: section +:info_title: Get started with Amazon EKS – \ + EKS Auto Mode +:info_titleabbrev: Create your first cluster – EKS Auto Mode +:keywords: using, Auto, getting, started, tutorial +:info_abstract: Learn how to create your first Amazon EKS cluster with nodes using EKS Auto Mode + +Like other EKS getting started experiences, creating your first cluster with EKS Auto Mode delegates the management of the cluster itself to {aws}. +However, EKS Auto Mode extends EKS automation by handing responsibility of many essential services needed to set up workload infrastructure (nodes, networks, and various services), making it easier to manage nodes and scale up to meet workload demands. + +Choose from one of the following ways to create a cluster with EKS Auto Mode: + +* <>: Use the `aws` command line interface to create a cluster. +* <>: Use the {aws}} Management Console to create a cluster. +* <>: Use the `eksctl` command line interface to create a cluster. + +If you are comparing different approaches to creating your first EKS cluster, +you should know that EKS Auto Mode has {aws} take over additional cluster management responsibilities +that include setting up components to: + +* Start up and scale nodes as workload demand increases and decreases. +* Regularly upgrade the cluster itself (control plane), node operating systems, and services running on nodes. +* Choose default settings that determine things like the size and speed of node storage and Pod network configuration. + +For details on what you get with EKS Auto Mode clusters, see <>. diff --git a/latest/ug/getting-started/getting-started-console.adoc b/latest/ug/getting-started/getting-started-console.adoc new file mode 100644 index 00000000..f593f89c --- /dev/null +++ b/latest/ug/getting-started/getting-started-console.adoc @@ -0,0 +1,399 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[getting-started-console,getting-started-console.title]] += Get started with Amazon EKS – {aws-management-console} and {aws} CLI +:info_doctype: section +:info_title: Get started with Amazon EKS – {aws-management-console} and \ + {aws} CLI +:info_titleabbrev: Create your first cluster – {aws-management-console} +:keywords: using, {aws-management-console}, {aws} CLI, getting, started, tutorial +:info_abstract: Learn how to create your first Amazon EKS cluster with nodes using the {aws-management-console} and \ + {aws} CLI. + +[abstract] +-- +Learn how to create your first Amazon EKS cluster with nodes using the {aws-management-console} and {aws} CLI. +-- + + +[NOTE] +==== +This topic covers getting started *without* EKS Auto Mode. + +EKS Auto Mode automates routine tasks for cluster compute, storage, and networking. xref:getting-started-automode[Learn how to get started with Amazon EKS Auto Mode. ] +==== + +This guide helps you to create all of the required resources to get started with Amazon Elastic Kubernetes Service (Amazon EKS) using the {aws-management-console} and the {aws} CLI. In this guide, you manually create each resource. At the end of this tutorial, you will have a running Amazon EKS cluster that you can deploy applications to. + +The procedures in this guide give you complete visibility into how each resource is created and how the resources interact with each other. If you'd rather have most of the resources created for you automatically, use the `eksctl` CLI to create your cluster and nodes. For more information, see <>. + +[[eks-prereqs,eks-prereqs.title]] +== Prerequisites + +Before starting this tutorial, you must install and configure the following tools and resources that you need to create and manage an Amazon EKS cluster. + + + +* *{aws} CLI* + – A command line tool for working with {aws} services, including Amazon EKS. For more information, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. After installing the {aws} CLI, we recommend that you also configure it. For more information, see link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the {aws} Command Line Interface User Guide. Note that {aws} CLI v2 is required to use the *update-kubeconfig* option shown in this page. +* *`kubectl`* + – A command line tool for working with [.noloc]`Kubernetes` clusters. For more information, see <>. +* *Required IAM permissions* + – The IAM security principal that you're using must have permissions to work with Amazon EKS IAM roles, service linked roles, {aws} CloudFormation, a VPC, and related resources. For more information, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html[Actions, resources, and condition keys for Amazon Elastic Kubernetes Service,type="documentation"] and link:IAM/latest/UserGuide/using-service-linked-roles.html[Using service-linked roles,type="documentation"] in the IAM User Guide. You must complete all steps in this guide as the same user. To check the current user, run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws sts get-caller-identity +---- + +We recommend that you complete the steps in this topic in a Bash shell. If you aren't using a Bash shell, some script commands such as line continuation characters and the way variables are set and used require adjustment for your shell. Additionally, the quoting and escaping rules for your shell might be different. For more information, see link:cli/latest/userguide/cli-usage-parameters-quoting-strings.html[Using quotation marks with strings in the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. + + +[[eks-create-cluster,eks-create-cluster.title]] +== Step 1: Create your Amazon EKS cluster + +[IMPORTANT] +==== + +To get started as simply and quickly as possible, this topic includes steps to create a cluster with default settings. Before creating a cluster for production use, we recommend that you familiarize yourself with all settings and deploy a cluster with the settings that meet your requirements. For more information, see <>. Some settings can only be enabled when creating your cluster. + +==== +. Create an Amazon VPC with public and private subnets that meets Amazon EKS requirements. Replace [.replaceable]`region-code` with any {aws} Region that is supported by Amazon EKS. For a list of {aws} Regions, see link:general/latest/gr/eks.html[Amazon EKS endpoints and quotas,type="documentation"] in the {aws} General Reference guide. You can replace [.replaceable]`my-eks-vpc-stack` with any name you choose. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation create-stack \ + --region region-code \ + --stack-name my-eks-vpc-stack \ + --template-url https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml +---- ++ +TIP: For a list of all the resources the previous command creates, open the {aws} CloudFormation console at link:cloudformation/[cloudformation,type="console"]. Choose the [.replaceable]`my-eks-vpc-stack` stack and then choose the *Resources* tab. +. Create a cluster IAM role and attach the required Amazon EKS IAM managed policy to it. [.noloc]`Kubernetes` clusters managed by Amazon EKS make calls to other {aws} services on your behalf to manage the resources that you use with the service. ++ +.. Copy the following contents to a file named [.replaceable]`eks-cluster-role-trust-policy.json`. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +---- +.. Create the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam create-role \ + --role-name myAmazonEKSClusterRole \ + --assume-role-policy-document file://"eks-cluster-role-trust-policy.json" +---- +.. Attach the required Amazon EKS managed IAM policy to the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKSClusterPolicy \ + --role-name myAmazonEKSClusterRole +---- +. Open the Amazon EKS console at https://console.aws.amazon.com/eks/home#/clusters[https://console.aws.amazon.com/eks/home#/clusters]. ++ +Make sure that the {aws} Region shown in the upper right of your console is the {aws} Region that you want to create your cluster in. If it's not, choose the dropdown next to the {aws} Region name and choose the {aws} Region that you want to use. +. Choose *Add cluster*, and then choose *Create*. If you don't see this option, then choose *Clusters* in the left navigation pane first. +. On the *Configure cluster* page, do the following: ++ +.. Enter a *Name* for your cluster, such as `my-cluster`. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +.. For *Cluster Service Role*, choose [.replaceable]`myAmazonEKSClusterRole`. +.. Leave the remaining settings at their default values and choose *Next*. +. On the *Specify networking* page, do the following: ++ +.. Choose the ID of the VPC that you created in a previous step from the *VPC* dropdown list. It is something like [.replaceable]`vpc-00x0000x000x0x000` | [.replaceable]`my-eks-vpc-stack-VPC`. +.. Leave the remaining settings at their default values and choose *Next*. +. On the *Configure observability* page, choose *Next*. +. On the *Select add-ons* page, choose *Next*. ++ +For more information on add-ons, see <>. +. On the *Configure selected add-ons settings* page, choose *Next*. +. On the *Review and create* page, choose *Create*. ++ +To the right of the cluster's name, the cluster status is *Creating* for several minutes until the cluster provisioning process completes. Don't continue to the next step until the status is *Active*. ++ +NOTE: You might receive an error that one of the Availability Zones in your request doesn't have sufficient capacity to create an Amazon EKS cluster. If this happens, the error output contains the Availability Zones that can support a new cluster. Retry creating your cluster with at least two subnets that are located in the supported Availability Zones for your account. For more information, see <>. + + +[[eks-configure-kubectl,eks-configure-kubectl.title]] +== Step 2: Configure your computer to communicate with your cluster + +In this section, you create a `kubeconfig` file for your cluster. The settings in this file enable the `kubectl` CLI to communicate with your cluster. + +Before proceeding, be sure that your cluster creation completed successfully in Step 1. + +. Create or update a `kubeconfig` file for your cluster. Replace [.replaceable]`region-code` with the {aws} Region that you created your cluster in. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-kubeconfig --region region-code --name my-cluster +---- ++ +By default, the `config` file is created in `~/.kube` or the new cluster's configuration is added to an existing `config` file in `~/.kube`. +. Test your configuration. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc +---- ++ +NOTE: If you receive any authorization or resource type errors, see <> in the troubleshooting topic. ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +svc/kubernetes ClusterIP 10.100.0.1 443/TCP 1m +---- + + +[[eks-launch-workers,eks-launch-workers.title]] +== Step 3: Create nodes + +[IMPORTANT] +==== + +To get started as simply and quickly as possible, this topic includes steps to create nodes with default settings. Before creating nodes for production use, we recommend that you familiarize yourself with all settings and deploy nodes with the settings that meet your requirements. For more information, see <>. Some settings can only be enabled when creating your nodes. + +==== + +You can create a cluster with one of the following node types. To learn more about each type, see <>. After your cluster is deployed, you can add other node types. + +* *Fargate – [.noloc]``Linux``* – Choose this type of node if you want to run [.noloc]``Linux`` applications on <>. Fargate is a serverless compute engine that lets you deploy [.noloc]``Kubernetes``[.noloc]``Pods`` without managing Amazon EC2 instances. +* *Managed nodes – [.noloc]``Linux``* – Choose this type of node if you want to run Amazon Linux applications on Amazon EC2 instances. Though not covered in this guide, you can also add <> and <> nodes to your cluster. + +==== +[role="tablist"] +Fargate - [.noloc]`Linux`:: + +Create a Fargate profile. When [.noloc]``Kubernetes``[.noloc]``Pods`` are deployed with criteria that matches the criteria defined in the profile, the [.noloc]``Pods`` are deployed to Fargate. ++ +*To create a Fargate profile* ++ +. Create an IAM role and attach the required Amazon EKS IAM managed policy to it. When your cluster creates [.noloc]``Pods`` on Fargate infrastructure, the components running on the Fargate infrastructure must make calls to {aws} APIs on your behalf. This is so that they can do actions such as pull container images from Amazon ECR or route logs to other {aws} services. The Amazon EKS [.noloc]``Pod`` execution role provides the IAM permissions to do this. + +.. Copy the following contents to a file named `pod-execution-role-trust-policy.json`. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. If you want to use the same role in all {aws} Regions in your account, replace [.replaceable]`region-code` with `{asterisk}`. Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`my-cluster` with the name of your cluster. If you want to use the same role for all clusters in your account, replace [.replaceable]`my-cluster` with `{asterisk}`. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Condition": { + "ArnLike": { + "aws:SourceArn": "{arn-aws}eks:region-code:111122223333:fargateprofile/my-cluster/*" + } + }, + "Principal": { + "Service": "eks-fargate-pods.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +---- + +.. Create a [.noloc]``Pod`` execution IAM role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam create-role \ + --role-name AmazonEKSFargatePodExecutionRole \ + --assume-role-policy-document file://"pod-execution-role-trust-policy.json" +---- + +.. Attach the required Amazon EKS managed IAM policy to the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy \ + --role-name AmazonEKSFargatePodExecutionRole +---- +.. Open the Amazon EKS console at https://console.aws.amazon.com/eks/home#/clusters[https://console.aws.amazon.com/eks/home#/clusters]. +.. On the *Clusters* page, choose the [.replaceable]`my-cluster` cluster. +.. On the *[.replaceable]`my-cluster`* page, do the following: ++ +.. Choose the *Compute* tab. +.. Under *Fargate Profiles*, choose *Add Fargate Profile*. +. On the *Configure Fargate Profile* page, do the following: ++ +.. For *Name*, enter a unique name for your Fargate profile, such as [.replaceable]`my-profile`. +.. For *Pod execution role*, choose the *AmazonEKSFargatePodExecutionRole* that you created in a previous step. +.. Choose the *Subnets* dropdown and deselect any subnet with `Public` in its name. Only private subnets are supported for [.noloc]``Pods`` that are running on Fargate. +.. Choose *Next*. +. On the *Configure [.noloc]``Pod`` selection* page, do the following: ++ +.. For *Namespace*, enter `default`. +.. Choose *Next*. +. On the *Review and create* page, review the information for your Fargate profile and choose *Create*. +. After a few minutes, the *Status* in the *Fargate Profile configuration* section will change from *Creating* to *Active*. Don't continue to the next step until the status is *Active*. +. If you plan to deploy all [.noloc]`Pods` to Fargate (none to Amazon EC2 nodes), do the following to create another Fargate profile and run the default name resolver ([.noloc]`CoreDNS`) on Fargate. ++ +NOTE: If you don't do this, you won't have any nodes at this time. ++ +.. On the *Fargate Profile* page, choose [.replaceable]`my-profile`. +.. Under *Fargate profiles*, choose *Add Fargate Profile*. +.. For *Name*, enter [.noloc]`CoreDNS`. +.. For *Pod execution role*, choose the *AmazonEKSFargatePodExecutionRole* that you created in a previous step. +.. Choose the *Subnets* dropdown and deselect any subnet with `Public` in its name. Only private subnets are supported for [.noloc]`Pods` running on Fargate. +.. Choose *Next*. +.. For *Namespace*, enter `kube-system`. +.. Choose *Match labels*, and then choose *Add label*. +.. Enter `k8s-app` for *Key* and `kube-dns` for value. This is necessary for the default name resolver ([.noloc]`CoreDNS`) to deploy to Fargate. +.. Choose *Next*. +.. On the *Review and create* page, review the information for your Fargate profile and choose *Create*. +.. Run the following command to remove the default `eks.amazonaws.com/compute-type : ec2` annotation from the [.noloc]`CoreDNS` [.noloc]`Pods`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl patch deployment coredns \ + -n kube-system \ + --type json \ + -p='[{"op": "remove", "path": "/spec/template/metadata/annotations/eks.amazonaws.com~1compute-type"}]' +---- ++ +NOTE: The system creates and deploys two nodes based on the Fargate profile label you added. You won't see anything listed in *Node groups* because they aren't applicable for Fargate nodes, but you will see the new nodes listed in the *Overview* tab. + + +Managed nodes - [.noloc]`Linux`:: + +Create a managed node group, specifying the subnets and node IAM role that you created in previous steps. ++ +*To create your {ec2} [.noloc]`Linux` managed node group* ++ +. Create a node IAM role and attach the required Amazon EKS IAM managed policy to it. The Amazon EKS node `kubelet` daemon makes calls to {aws} APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. ++ +.. Copy the following contents to a file named `node-role-trust-policy.json`. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +---- +.. Create the node IAM role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam create-role \ + --role-name myAmazonEKSNodeRole \ + --assume-role-policy-document file://"node-role-trust-policy.json" +---- +.. Attach the required managed IAM policies to the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKSWorkerNodePolicy \ + --role-name myAmazonEKSNodeRole +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEC2ContainerRegistryReadOnly \ + --role-name myAmazonEKSNodeRole +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKS_CNI_Policy \ + --role-name myAmazonEKSNodeRole +---- +.. Open the Amazon EKS console at https://console.aws.amazon.com/eks/home#/clusters[https://console.aws.amazon.com/eks/home#/clusters]. +.. Choose the name of the cluster that you created in <>, such as [.replaceable]`my-cluster`. +.. On the *[.replaceable]`my-cluster`* page, do the following: ++ +.. Choose the *Compute* tab. +.. Choose *Add Node Group*. +. On the *Configure Node Group* page, do the following: ++ +.. For *Name*, enter a unique name for your managed node group, such as [.replaceable]`my-nodegroup`. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. +.. For *Node IAM role name*, choose [.replaceable]`myAmazonEKSNodeRole` role that you created in a previous step. We recommend that each node group use its own unique IAM role. +.. Choose *Next*. +. On the *Set compute and scaling configuration* page, accept the default values and choose *Next*. +. On the *Specify networking* page, accept the default values and choose *Next*. +. On the *Review and create* page, review your managed node group configuration and choose *Create*. +. After several minutes, the *Status* in the *Node Group configuration* section will change from *Creating* to *Active*. Don't continue to the next step until the status is *Active*. +==== + +[[gs-view-resources,gs-view-resources.title]] + +== Step 4: View resources + +You can view your nodes and [.noloc]`Kubernetes` workloads. + +. In the left navigation pane, choose *Clusters*. In the list of *Clusters*, choose the name of the cluster that you created, such as [.replaceable]`my-cluster`. +. On the *[.replaceable]`my-cluster`* page, choose the following: ++ +.. *Compute* + tab – You see the list of *Nodes* that were deployed for the cluster. You can choose the name of a node to see more information about it. +.. *Resources* tab + – You see all of the [.noloc]`Kubernetes` resources that are deployed by default to an Amazon EKS cluster. Select any resource type in the console to learn more about it. + + +[[gs-console-clean-up,gs-console-clean-up.title]] +== Step 5: Delete resources + +After you've finished with the cluster and nodes that you created for this tutorial, you should delete the resources that you created. If you want to do more with this cluster before you delete the resources, see <>. + +. Delete any node groups or Fargate profiles that you created. ++ +.. Open the Amazon EKS console at https://console.aws.amazon.com/eks/home#/clusters[https://console.aws.amazon.com/eks/home#/clusters]. +.. In the left navigation pane, choose *Clusters*. In the list of clusters, choose [.replaceable]`my-cluster`. +.. Choose the *Compute* tab. +.. If you created a node group, choose the [.replaceable]`my-nodegroup` node group and then choose *Delete*. Enter [.replaceable]`my-nodegroup`, and then choose *Delete*. +.. For each Fargate profile that you created, choose it and then choose *Delete*. Enter the name of the profile, and then choose *Delete*. ++ +NOTE: When deleting a second Fargate profile, you may need to wait for the first one to finish deleting. +.. Don't continue until the node group or Fargate profiles are deleted. +. Delete the cluster. ++ +.. In the left navigation pane, choose *Clusters*. In the list of clusters, choose [.replaceable]`my-cluster`. +.. Choose *Delete cluster*. +.. Enter [.replaceable]`my-cluster` and then choose *Delete*. Don't continue until the cluster is deleted. +. Delete the VPC {aws} CloudFormation stack that you created. ++ +.. Open the link:cloudformation/[{aws} CloudFormation console,type="console"]. +.. Choose the [.replaceable]`my-eks-vpc-stack` stack, and then choose *Delete*. +.. In the *Delete [.replaceable]`my-eks-vpc-stack`* confirmation dialog box, choose *Delete stack*. +. Delete the IAM roles that you created. ++ +.. Open the IAM console at https://console.aws.amazon.com/iam/. +.. In the left navigation pane, choose *Roles*. +.. Select each role you created from the list (*[.replaceable]`myAmazonEKSClusterRole`*, as well as *AmazonEKSFargatePodExecutionRole* or [.replaceable]`myAmazonEKSNodeRole`). Choose *Delete*, enter the requested confirmation text, then choose *Delete*. + + +[[gs-console-next-steps,gs-console-next-steps.title]] +== Next steps + +The following documentation topics help you to extend the functionality of your cluster. + + + +* The link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that created the cluster is the only principal that can make calls to the [.noloc]`Kubernetes` API server with `kubectl` or the {aws-management-console}. If you want other IAM principals to have access to your cluster, then you need to add them. For more information, see <> and <>. +* Deploy a <> to your cluster. +* Before deploying a cluster for production use, we recommend familiarizing yourself with all of the settings for <> and <>. Some settings (such as enabling SSH access to Amazon EC2 nodes) must be made when the cluster is created. +* To increase security for your cluster, <>. diff --git a/latest/ug/getting-started/getting-started-eksctl.adoc b/latest/ug/getting-started/getting-started-eksctl.adoc new file mode 100644 index 00000000..4b05e974 --- /dev/null +++ b/latest/ug/getting-started/getting-started-eksctl.adoc @@ -0,0 +1,179 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[getting-started-eksctl,getting-started-eksctl.title]] += Get started with Amazon EKS – `eksctl` +:info_doctype: section +:info_title: Get started with Amazon EKS – \ + eksctl +:info_titleabbrev: Create your first cluster – eksctl +:keywords: using, eksctl, getting, started, tutorial +:info_abstract: Learn how to create your first Amazon EKS cluster with nodes using the eksctl command \ + line tool. + +[abstract] +-- +Learn how to create your first Amazon EKS cluster with nodes using the `eksctl` command line tool. +-- + +[NOTE] +==== +This topic covers getting started *without* EKS Auto Mode. + +EKS Auto Mode automates routine tasks for cluster compute, storage, and networking. xref:getting-started-automode[Learn how to get started with Amazon EKS Auto Mode. ] +==== + + +This guide helps you to create all of the required resources to get started with Amazon Elastic Kubernetes Service (Amazon EKS) using `eksctl`, a simple command line utility for creating and managing [.noloc]`Kubernetes` clusters on Amazon EKS. At the end of this tutorial, you will have a running Amazon EKS cluster that you can deploy applications to. + +The procedures in this guide create several resources for you automatically that you have to create manually when you create your cluster using the {aws-management-console}. If you'd rather manually create most of the resources to better understand how they interact with each other, then use the {aws-management-console} to create your cluster and compute. For more information, see <>. + +[[eksctl-prereqs,eksctl-prereqs.title]] +== Prerequisites + +Before starting this tutorial, you must install and configure the {aws} CLI, kubectl, and eksctl tools as described in <>. + +[[create-cluster-gs-eksctl,create-cluster-gs-eksctl.title]] +== Step 1: Create your Amazon EKS cluster and nodes + +[IMPORTANT] +==== + +To get started as simply and quickly as possible, this topic includes steps to create a cluster and nodes with default settings. Before creating a cluster and nodes for production use, we recommend that you familiarize yourself with all settings and deploy a cluster and nodes with the settings that meet your requirements. For more information, see <> and <>. Some settings can only be enabled when creating your cluster and nodes. + +==== + +You can create a cluster with one of the following node types. To learn more about each type, see <>. After your cluster is deployed, you can add other node types. + + +* *Fargate – [.noloc]``Linux``* – Select this type of node if you want to run [.noloc]``Linux`` applications on <>. Fargate is a serverless compute engine that lets you deploy [.noloc]``Kubernetes`` [.noloc]``Pods`` without managing Amazon EC2 instances. +* *Managed nodes – [.noloc]``Linux``* – Select this type of node if you want to run Amazon Linux applications on Amazon EC2 instances. Though not covered in this guide, you can also add <> and <> nodes to your cluster. + +Create your Amazon EKS cluster with the following command. You can replace [.replaceable]`my-cluster` with your own value. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. Replace [.replaceable]`region-code` with any {aws} Region that is supported by Amazon EKS. For a list of {aws} Regions, see link:general/latest/gr/eks.html[Amazon EKS endpoints and quotas,type="documentation"] in the {aws} General Reference guide. + +==== +[role="tablist"] +Fargate - [.noloc]`Linux`:: ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create cluster --name my-cluster --region region-code --fargate +---- + +Managed nodes - [.noloc]`Linux`:: ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create cluster --name my-cluster --region region-code +---- +==== + + +Cluster creation takes several minutes. During creation you'll see several lines of output. The last line of output is similar to the following example line. + +---- +[...] +[✓] EKS cluster "my-cluster" in "`region-code`" region is ready +---- + +`eksctl` created a `kubectl` config file in `~/.kube/config` or added the new cluster's configuration within an existing config file in `~/.kube/config` on your computer. + +After cluster creation is complete, view the {aws} CloudFormation stack named `eksctl-[.replaceable]``my-cluster``-cluster` in the {aws} CloudFormation link:cloudformation/[console,type="console"] to see all of the resources that were created. + +[[gs-eksctl-view-resources,gs-eksctl-view-resources.title]] + + +== Step 2: View [.noloc]`Kubernetes` resources +. View your cluster nodes. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes -o wide +---- ++ +An example output is as follows. ++ +==== +[role="tablist"] +Fargate - [.noloc]`Linux`:: ++ +[source,none,subs="verbatim,attributes"] +---- +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +fargate-ip-192-0-2-0.region-code.compute.internal Ready 8m3s v1.2.3-eks-1234567 192.0.2.0 Amazon Linux 2 1.23.456-789.012.amzn2.x86_64 containerd://1.2.3 +fargate-ip-192-0-2-1.region-code.compute.internal Ready 7m30s v1.2.3-eks-1234567 192-0-2-1 Amazon Linux 2 1.23.456-789.012.amzn2.x86_64 containerd://1.2.3 +---- + +Managed nodes - [.noloc]`Linux`:: ++ +[source,none,subs="verbatim,attributes"] +---- +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +ip-192-0-2-0.region-code.compute.internal Ready 6m7s v1.2.3-eks-1234567 192.0.2.0 192.0.2.2 Amazon Linux 2 1.23.456-789.012.amzn2.x86_64 containerd://1.2.3 +ip-192-0-2-1.region-code.compute.internal Ready 6m4s v1.2.3-eks-1234567 192.0.2.1 192.0.2.3 Amazon Linux 2 1.23.456-789.012.amzn2.x86_64 containerd://1.2.3 +---- +==== ++ +For more information about what you see in the output, see <>. + +. View the workloads running on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -A -o wide +---- ++ +An example output is as follows. ++ +==== +[role="tablist"] +Fargate - [.noloc]`Linux`:: ++ +[source,none,subs="verbatim,attributes"] +---- +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system coredns-1234567890-abcde 1/1 Running 0 18m 192.0.2.0 fargate-ip-192-0-2-0.region-code.compute.internal +kube-system coredns-1234567890-12345 1/1 Running 0 18m 192.0.2.1 fargate-ip-192-0-2-1.region-code.compute.internal +---- + +Managed nodes - [.noloc]`Linux`:: ++ +[source,none,subs="verbatim,attributes"] +---- +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system aws-node-12345 1/1 Running 0 7m43s 192.0.2.1 ip-192-0-2-1.region-code.compute.internal +kube-system aws-node-67890 1/1 Running 0 7m46s 192.0.2.0 ip-192-0-2-0.region-code.compute.internal +kube-system coredns-1234567890-abcde 1/1 Running 0 14m 192.0.2.3 ip-192-0-2-3.region-code.compute.internal +kube-system coredns-1234567890-12345 1/1 Running 0 14m 192.0.2.4 ip-192-0-2-4.region-code.compute.internal +kube-system kube-proxy-12345 1/1 Running 0 7m46s 192.0.2.0 ip-192-0-2-0.region-code.compute.internal +kube-system kube-proxy-67890 1/1 Running 0 7m43s 192.0.2.1 ip-192-0-2-1.region-code.compute.internal +---- +==== ++ +For more information about what you see in the output, see <>. + + +[[gs-eksctl-clean-up,gs-eksctl-clean-up.title]] + +== Step 3: Delete your cluster and nodes + +After you've finished with the cluster and nodes that you created for this tutorial, you should clean up by deleting the cluster and nodes with the following command. If you want to do more with this cluster before you clean up, see <>. + +[source,bash,subs="verbatim,attributes"] +---- +eksctl delete cluster --name my-cluster --region region-code +---- + + +[[gs-eksctl-next-steps,gs-eksctl-next-steps.title]] +== Next steps + +The following documentation topics help you to extend the functionality of your cluster. + + + +* Deploy a <> to your cluster. +* The link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that created the cluster is the only principal that can make calls to the [.noloc]`Kubernetes` API server with `kubectl` or the {aws-management-console}. If you want other IAM principals to have access to your cluster, then you need to add them. For more information, see <> and <>. +* Before deploying a cluster for production use, we recommend familiarizing yourself with all of the settings for <> and <>. Some settings (such as enabling SSH access to Amazon EC2 nodes) must be made when the cluster is created. +* To increase security for your cluster, <>. diff --git a/latest/ug/getting-started/getting-started.adoc b/latest/ug/getting-started/getting-started.adoc new file mode 100644 index 00000000..ae1fc70f --- /dev/null +++ b/latest/ug/getting-started/getting-started.adoc @@ -0,0 +1,45 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[[getting-started,getting-started.title]] += Get started with Amazon EKS +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Get started with Amazon EKS +:info_titleabbrev: Get started +:info_abstract: Learn about the tools needed for creating and working with an Amazon EKS cluster. +:keywords: getting, started, tutorials, quick, start + +[abstract] +-- +Learn about the tools needed for creating and working with an Amazon EKS cluster. +-- + +Make sure that you are set up to use Amazon EKS before going through the getting started guides. For more information, see <>. + +There are two getting started guides available for creating a new [.noloc]`Kubernetes` cluster with nodes in Amazon EKS: + + + +* <> – This getting started guide helps you to install all of the required resources to get started with Amazon EKS using `eksctl`, a simple command line utility for creating and managing [.noloc]`Kubernetes` clusters on Amazon EKS. At the end of the tutorial, you will have a running Amazon EKS cluster that you can deploy applications to. This is the fastest and simplest way to get started with Amazon EKS. +* <> – This getting started guide helps you to create all of the required resources to get started with Amazon EKS using the {aws-management-console} and {aws} CLI. At the end of the tutorial, you will have a running Amazon EKS cluster that you can deploy applications to. In this guide, you manually create each resource required for an Amazon EKS cluster. The procedures give you visibility into how each resource is created and how they interact with each other. + +We also offer the following references: + + + +* For a collection of hands-on tutorials, see https://community.aws/tags/eks-cluster-setup[EKS Cluster Setup] on _{aws} Community_. +* For code examples, see link:code-library/latest/ug/eks_code_examples.html[Code examples for Amazon EKS using {aws} SDKs,type="documentation"]. + + +include::getting-started-automode.adoc[leveloffset=+1] + +include::getting-started-eksctl.adoc[leveloffset=+1] + +include::getting-started-console.adoc[leveloffset=+1] diff --git a/latest/ug/getting-started/install-awscli.adoc b/latest/ug/getting-started/install-awscli.adoc new file mode 100644 index 00000000..08e3a9cf --- /dev/null +++ b/latest/ug/getting-started/install-awscli.adoc @@ -0,0 +1,114 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[install-awscli,install-awscli.title]] += Set up {aws} CLI +:info_doctype: section +:info_title: Set up {aws} CLI +:info_titleabbrev: Set up {aws} CLI +:keywords: setting up, setup +:info_abstract: Set up the {aws} CLI for managing {aws} resources needed to use Amazon EKS. Follow these \ + instructions to set up the credentials with {aws} CLI. + +[abstract] +-- +Set up the {aws} CLI for managing {aws} resources needed to use Amazon EKS. Follow these instructions to set up the credentials with {aws} CLI. +-- + +The link:cli/[{aws} CLI,type="marketing"] is a command line tool for working with {aws} services, including Amazon EKS. It is also used to authenticate IAM users or roles for access to the Amazon EKS cluster and other {aws} resources from your local machine. To provision resources in {aws} from the command line, you need to obtain an {aws} access key ID and secret key to use in the command line. Then you need to configure these credentials in the {aws} CLI. If you haven't already installed the {aws} CLI, see link:cli/latest/userguide/cli-chap-install.html[Install or update the latest version of the {aws} CLI,type="documentation"] in the _{aws} Command Line Interface User Guide_. + +[[create-access-key,create-access-key.title]] +== To create an access key +. Sign into the https://console.aws.amazon.com/[{aws-management-console}]. +. For single-user or multiple-user accounts: ++ +** *Single-user account –*:: + In the top right, choose your {aws} user name to open the navigation menu. For example, choose *`webadmin`*. +** *Multiple-user account –*:: + Choose IAM from the list of services. From the IAM Dashboard, select *Users*, and choose the name of the user. +. Choose *Security credentials*. +. Under *Access keys*, choose *Create access key*. +. Choose *Command Line Interface (CLI)*, then choose *Next*. +. Choose *Create access key*. +. Choose *Download .csv file*. + + +[[configure-cli,configure-cli.title]] +== To configure the {aws} CLI + +After installing the {aws} CLI, do the following steps to configure it. For more information, see link:cli/latest/userguide/cli-chap-configure.html[Configure the {aws} CLI,type="documentation"] in the _{aws} Command Line Interface User Guide_. + +. In a terminal window, enter the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws configure +---- ++ +Optionally, you can configure a named profile, such as `--profile cluster-admin`. If you configure a named profile in the {aws} CLI, you must *always* pass this flag in subsequent commands. +. Enter your {aws} credentials. For example: ++ +[source,bash,subs="verbatim,attributes"] +---- +Access Key ID [None]: AKIAIOSFODNN7EXAMPLE +Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +Default region name [None]: region-code +Default output format [None]: json +---- + + +[[security-token,security-token.title]] +== To get a security token + +If needed, run the following command to get a new security token for the {aws} CLI. For more information, see link:cli/latest/reference/sts/get-session-token.html[get-session-token,type="documentation"] in the _{aws} CLI Command Reference_. + +By default, the token is valid for 15 minutes. To change the default session timeout, pass the `--duration-seconds` flag. For example: + +[source,bash,subs="verbatim,attributes"] +---- +aws sts get-session-token --duration-seconds 3600 +---- + +This command returns the temporary security credentials for an {aws} CLI session. You should see the following response output: + +[source,bash,subs="verbatim,attributes"] +---- +{ + "Credentials": { + "AccessKeyId": "ASIA5FTRU3LOEXAMPLE", + "SecretAccessKey": "JnKgvwfqUD9mNsPoi9IbxAYEXAMPLE", + "SessionToken": "VERYLONGSESSIONTOKENSTRING", + "Expiration": "2023-02-17T03:14:24+00:00" + } +} +---- + + +[[verify-identity,verify-identity.title]] +== To verify the user identity + +If needed, run the following command to verify the {aws} credentials for your IAM user identity (such as [.replaceable]`ClusterAdmin`) for the terminal session. + +[source,bash,subs="verbatim,attributes"] +---- +aws sts get-caller-identity +---- + +This command returns the Amazon Resource Name (ARN) of the IAM entity that's configured for the {aws} CLI. You should see the following example response output: + +[source,bash,subs="verbatim,attributes"] +---- +{ + "UserId": "AKIAIOSFODNN7EXAMPLE", + "Account": "01234567890", + "Arn": "{arn-aws}iam::01234567890:user/ClusterAdmin" +} +---- + + +[[install-awscli-next-steps,install-awscli-next-steps.title]] +== Next steps + +* <> +* <> diff --git a/latest/ug/getting-started/install-kubectl.adoc b/latest/ug/getting-started/install-kubectl.adoc new file mode 100644 index 00000000..384eb84c --- /dev/null +++ b/latest/ug/getting-started/install-kubectl.adoc @@ -0,0 +1,816 @@ +//!!NODE_ROOT
+ + +[.topic] +[[install-kubectl,install-kubectl.title]] += Set up `kubectl` and `eksctl` +:info_doctype: section +:info_title: Set up kubectl and eksctl +:info_titleabbrev: Set up kubectl and eksctl +:keywords: install, update, kubectl +:info_abstract: Learn how to install or update the kubectl and eksctl command line tools \ + to work with Kubernetes and Amazon EKS features. + + +include::../attributes.txt[] + +[abstract] +-- +Learn how to install or update the `kubectl` and `eksctl` command line tools to work with [.noloc]`Kubernetes` and Amazon EKS features. +-- + +`Kubectl` is a command line tool that you use to communicate with the [.noloc]`Kubernetes` API server. The `kubectl` binary is available in many operating system package managers. Using a package manager for your installation is often easier than a manual download and install process. The `eksctl` command lets you create and modify Amazon EKS clusters. + +Topics on this page help you install and set up these tools: + + + +* <> +* <> + + +[[kubectl-install-update,kubectl-install-update.title]] +== Install or update `kubectl` + +This topic helps you to download and install, or update, the `kubectl` binary on your device. The binary is identical to the https://kubernetes.io/docs/tasks/tools/#kubectl[upstream community versions]. The binary is not unique to Amazon EKS or {aws}. Use the steps below to get the specific version of `kubectl` that you need, although many builders simply run `brew install kubectl` to install it. + +[NOTE] +==== + +You must use a `kubectl` version that is within one minor version difference of your Amazon EKS cluster control plane. For example, a `1.30` `kubectl` client works with [.noloc]`Kubernetes` `1.29`, `1.30`, and `1.31` clusters. + +==== + +== Step 1: Check if `kubectl` is installed + +Determine whether you already have `kubectl` installed on your device. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl version --client +---- + +If you have `kubectl` installed in the path of your device, the example output includes information similar to the following. If you want to update the version that you currently have installed with a later version, complete the next step, making sure to install the new version in the same location that your current version is in. + +[source,bash,subs="verbatim,attributes"] +---- +Client Version: v1.31.X-eks-1234567 +---- + +If you receive no output, then you either don't have `kubectl` installed, or it's not installed in a location that's in your device's path. + +== Step 2: Install or update `kubectl` + +Install or update `kubectl` on one of the following operating systems: + +* <> +* <> +* <> +* <> + +=== macOS [[macos_kubectl]] + +. Download the binary for your cluster's [.noloc]`Kubernetes` version from Amazon S3. ++ +**** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/darwin/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/darwin/amd64/kubectl +---- +. (Optional) Verify the downloaded binary with the `SHA-256` checksum for your binary. ++ +.. Download the `SHA-256` checksum for your cluster's [.noloc]`Kubernetes` version. ++ +***** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/darwin/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/darwin/amd64/kubectl.sha256 +---- +.. Check the `SHA-256` checksum for your downloaded binary. ++ +[source,bash,subs="verbatim,attributes"] +---- +openssl sha1 -sha256 kubectl +---- +.. Make sure that the generated checksum in the output matches in the checksum in the downloaded `kubectl.sha256` file. +. Apply execute permissions to the binary. ++ +[source,bash,subs="verbatim,attributes"] +---- +chmod +x ./kubectl +---- +. Copy the binary to a folder in your `PATH`. If you have already installed a version of `kubectl`, then we recommend creating a `$HOME/bin/kubectl` and ensuring that `$HOME/bin` comes first in your `$PATH`. ++ +[source,bash,subs="verbatim,attributes"] +---- +mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH +---- +. (Optional) Add the `$HOME/bin` path to your shell initialization file so that it is configured when you open a shell. ++ +[source,bash,subs="verbatim,attributes"] +---- +echo 'export PATH=$HOME/bin:$PATH' >> ~/.bash_profile +---- + +=== Linux (amd64) [[linux_amd64_kubectl]] + +. Download the `kubectl` binary for your cluster's [.noloc]`Kubernetes` version from Amazon S3. ++ +**** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/linux/amd64/kubectl +---- +**** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/linux/amd64/kubectl +---- +. (Optional) Verify the downloaded binary with the `SHA-256` checksum for your binary. ++ +.. Download the `SHA-256` checksum for your cluster's [.noloc]`Kubernetes` version from Amazon S3using the command for your device's hardware platform. ++ +***** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/linux/amd64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/linux/amd64/kubectl.sha256 +---- +.. Check the `SHA-256` checksum for your downloaded binary with one of the following commands. ++ +[source,bash,subs="verbatim,attributes"] +---- +sha256sum -c kubectl.sha256 +---- +or ++ +---- +openssl sha1 -sha256 kubectl +---- +.. For the first, you should see `kubectl: OK`, for the second, you can check that the generated checksum in the output matches in the checksum in the downloaded `kubectl.sha256` file. +. Apply execute permissions to the binary. ++ +[source,bash,subs="verbatim,attributes"] +---- +chmod +x ./kubectl +---- +. Copy the binary to a folder in your `PATH`. If you have already installed a version of `kubectl`, then we recommend creating a `$HOME/bin/kubectl` and ensuring that `$HOME/bin` comes first in your `$PATH`. ++ +[source,bash,subs="verbatim,attributes"] +---- +mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH +---- +. (Optional) Add the `$HOME/bin` path to your shell initialization file so that it is configured when you open a shell. ++ +NOTE: This step assumes you are using the Bash shell; if you are using another shell, change the command to use your specific shell initialization file. ++ +[source,bash,subs="verbatim,attributes"] +---- +echo 'export PATH=$HOME/bin:$PATH' >> ~/.bashrc +---- + +=== Linux (arm64) [[linux_arm64_kubectl]] + +. Download the `kubectl` binary for your cluster's [.noloc]`Kubernetes` version from Amazon S3. ++ +**** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/linux/arm64/kubectl +---- +**** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/linux/arm64/kubectl +---- +. (Optional) Verify the downloaded binary with the `SHA-256` checksum for your binary. ++ +.. Download the `SHA-256` checksum for your cluster's [.noloc]`Kubernetes` version from Amazon S3using the command for your device's hardware platform. ++ +***** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/linux/arm64/kubectl.sha256 +---- +***** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/linux/arm64/kubectl.sha256 +---- +.. Check the `SHA-256` checksum for your downloaded binary with one of the following commands. ++ +[source,bash,subs="verbatim,attributes"] +---- +sha256sum -c kubectl.sha256 +---- +or ++ +---- +openssl sha1 -sha256 kubectl +---- +.. For the first, you should see `kubectl: OK`, for the second, you can check that the generated checksum in the output matches in the checksum in the downloaded `kubectl.sha256` file. +. Apply execute permissions to the binary. ++ +[source,bash,subs="verbatim,attributes"] +---- +chmod +x ./kubectl +---- +. Copy the binary to a folder in your `PATH`. If you have already installed a version of `kubectl`, then we recommend creating a `$HOME/bin/kubectl` and ensuring that `$HOME/bin` comes first in your `$PATH`. ++ +[source,bash,subs="verbatim,attributes"] +---- +mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH +---- +. (Optional) Add the `$HOME/bin` path to your shell initialization file so that it is configured when you open a shell. ++ +NOTE: This step assumes you are using the Bash shell; if you are using another shell, change the command to use your specific shell initialization file. ++ +[source,bash,subs="verbatim,attributes"] +---- +echo 'export PATH=$HOME/bin:$PATH' >> ~/.bashrc +---- + +=== Windows [[windows_kubectl]] + +. Open a [.noloc]`PowerShell` terminal. +. Download the `kubectl` binary for your cluster's [.noloc]`Kubernetes` version from Amazon S3. ++ +**** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/windows/amd64/kubectl.exe +---- +**** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/windows/amd64/kubectl.exe +---- +. (Optional) Verify the downloaded binary with the `SHA-256` checksum for your binary. ++ +.. Download the `SHA-256` checksum for your cluster's [.noloc]`Kubernetes` version for [.noloc]`Windows`. ++ +***** [.noloc]`Kubernetes` `1.32` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.32.0/2024-12-20/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.31` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.31.3/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.30` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.30.7/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.29` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.29.10/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.28` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.15/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.27` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.27.16/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.26` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.26.15/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.25` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.25.16/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.24` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.24.17/2024-12-12/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.23` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.17/2024-09-11/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.22` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.17/2024-09-11/bin/windows/amd64/kubectl.exe.sha256 +---- +***** [.noloc]`Kubernetes` `1.21` ++ +[source,bash,subs="verbatim,attributes"] +---- +curl.exe -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.21.14/2024-09-11/bin/windows/amd64/kubectl.exe.sha256 +---- +.. Check the `SHA-256` checksum for your downloaded binary. ++ +[source,bash,subs="verbatim,attributes"] +---- +Get-FileHash kubectl.exe +---- +.. Make sure that the generated checksum in the output matches in the checksum in the downloaded `kubectl.sha256` file. The PowerShell output should be an uppercase equivalent string of characters. +. Copy the binary to a folder in your `PATH`. If you have an existing directory in your `PATH` that you use for command line utilities, copy the binary to that directory. Otherwise, complete the following steps. ++ +.. Create a new directory for your command line binaries, such as `C:\bin`. +.. Copy the `kubectl.exe` binary to your new directory. +.. Edit your user or system `PATH` environment variable to add the new directory to your `PATH`. +.. Close your [.noloc]`PowerShell` terminal and open a new one to pick up the new `PATH` variable. +. After you install `kubectl`, you can verify its version. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl version --client +---- +. When first installing `kubectl`, it isn't yet configured to communicate with any server. We will cover this configuration as needed in other procedures. If you ever need to update the configuration to communicate with a particular cluster, you can run the following command.Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-kubeconfig --region region-code --name my-cluster +---- +. Consider configuring auto completion, which lets you use the tab key to complete `kubectl` subcommands after typing the first few letters. See https://kubernetes.io/docs/reference/kubectl/quick-reference/#kubectl-autocomplete[Kubectl autocomplete] in the [.noloc]`Kubernetes` documentation for details. + + +[[eksctl-install-update,eksctl-install-update.title]] +== Install `eksctl` + +The `eksctl` CLI is used to work with EKS clusters. It automates many individual tasks. See https://eksctl.io/installation[Installation] in the `eksctl` documentation for instructions on installing `eksctl`. + +When using `eksctl` the IAM security principal that you're using must have permissions to work with Amazon EKS IAM roles, service linked roles, {aws} CloudFormation, a VPC, and related resources. For more information, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html[Actions, resources, and condition keys for Amazon Elastic Container Service for Kubernetes,type="documentation"] and link:IAM/latest/UserGuide/using-service-linked-roles.html[Using service-linked roles,type="documentation"] in the IAM User Guide. You must complete all steps in this guide as the same user. To check the current user, run the following command: + +[source,bash,subs="verbatim,attributes"] +---- +aws sts get-caller-identity +---- + + +[[install-kubectl-next-steps,install-kubectl-next-steps.title]] +== Next steps + +* <> diff --git a/latest/ug/getting-started/learn-eks.adoc b/latest/ug/getting-started/learn-eks.adoc new file mode 100644 index 00000000..1631c12e --- /dev/null +++ b/latest/ug/getting-started/learn-eks.adoc @@ -0,0 +1,158 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[learn-eks,learn-eks.title]] += Learn Amazon EKS by example +:info_doctype: chapter +:info_title: Learn Amazon EKS by example +:info_titleabbrev: Learn Amazon EKS +:keywords: tutorial, workshop, developer, learn +:info_abstract: Find learning paths to extend your knowledge of Amazon EKS. +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . + +[abstract] +-- +Find learning paths to extend your knowledge of Amazon EKS. +-- + +[[overview,overview.title]] +== Overview + +This Amazon EKS User Guide contains general-purpose procedures to create your first EKS cluster from the <> or <> and a solid reference for all major Amazon EKS components. However, as an Amazon EKS cluster administrator or developer, you can gain a deeper understanding of Amazon EKS by following learning paths that exist in sites outside of this guide. These sites can help you: + + + +* *Set up specific types of clusters*. Specific cluster types can be based on your workload types or security requirements. For example, you may want to tune a cluster to run batch, machine learning, or compute-intensive workloads. +* *Enhance your clusters*. You can add advanced features to your cluster to provide things like observability,flexible storage, autoscaling, or specialized cluster networking. +* *Automate updates*. Using features like GitOps, you can set up to provision cluster infrastructure and workloads automatically, based on changes that occur to those components in your Git repositories. +* *Use advanced cluster setup tools*. While `eksctl` provides a quick way to create a cluster, there are other tools that can make it easier to configure and upgrade more complex clusters. These include tools like https://www.terraform.io/[Terraform] and link:cloudformation/[CloudFormation,type="marketing"]. + +To start out on your Amazon EKS learning path, I recommend that you visit some of the sites described on this page. If you run into problems along the way, there are also resources to help you get through them. For example, the https://repost.aws/search/content?globalSearch=EKS[Re:post Knowledge Center] lets you search the support database for Amazon EKS-related support issues. Also the https://aws.github.io/aws-eks-best-practices/[Amazon EKS Best Practices Guide] offers tips on the best ways to set up your production-grade clusters. + +[[eks-workshop,eks-workshop.title]] +== Amazon EKS Workshop + +Starting with a basic understanding of Kubernetes and containers, the https://www.eksworkshop.com/[Amazon EKS workshop] is a learning platform for walking a cluster administrator through important features of Amazon EKS. Here are ways you can engage with the Amazon EKS workshop: + + + +* *Amazon EKS Basics*: Watch the video on the https://www.eksworkshop.com/docs/introduction[Introduction] page to learn about how Amazon EKS implements Kubernetes features on the {aws} cloud. If you need an even more basic understanding of Kubernetes, watch the https://www.youtube.com/watch?v=a2gfpZE8vXY[What is Kubernetes] video. +* *Amazon EKS Setup*: If you have an {aws} account, the https://www.eksworkshop.com/docs/introduction/setup/[Setup] section helps you set up a CloudShell environment to you for creating a cluster. It offers a choice of https://www.eksworkshop.com/docs/introduction/setup/your-account/using-eksctl[eksctl] (a simple cluster creation command line) and https://www.eksworkshop.com/docs/introduction/setup/your-account/using-terraform[Terraform] (a more infrastructure-as-code approach to creating a cluster) for creating your Amazon EKS cluster. +* *Amazon EKS Getting started*: Try out a simple web store from the https://www.eksworkshop.com/docs/introduction/getting-started/about[Sample application] section. You can use this throughout the other exercises. In this section, you can also learn about https://www.eksworkshop.com/docs/introduction/getting-started/packaging-application[packaging container images] and how microservices are managed using Kubernetes Pods, Deployments, Services, StatefulSets and Namespaces. Then use Kustomize to deploy changes to Kubernetes manifests. +* *Amazon EKS Fundamentals*: Using {aws} features such as the https://www.eksworkshop.com/docs/fundamentals/exposing/aws-lb-controller[{aws} Load Balancer Controller], the workshop shows you how to expose your applications to the outside world. For storage, the workshop showcases how to use https://www.eksworkshop.com/docs/fundamentals/storage/ebs/[Amazon EBS] for block storage, https://www.eksworkshop.com/docs/fundamentals/storage/efs/[Amazon EFS] for filesystem storage, and Amazon FSx for NetApp ONTAP to manage ONTAP file systems in {aws}. For node management, the workshop helps you set up https://www.eksworkshop.com/docs/fundamentals/managed-node-groups/[Managed Node Groups]. +* *Amazon EKS advanced features*: More advanced features offered through the Amazon EKS workshop include labs for setting up: ++ +** Autoscaling: This includes node autoscaling (with https://www.eksworkshop.com/docs/autoscaling/compute/cluster-autoscaler/[Cluster Autoscaler] or https://www.eksworkshop.com/docs/autoscaling/compute/karpenter/[Karpenter]) and workload autoscaling (with https://www.eksworkshop.com/docs/autoscaling/workloads/horizontal-pod-autoscaler/[Horizontal Pod Autoscaler] and https://www.eksworkshop.com/docs/autoscaling/workloads/cluster-proportional-autoscaler/[Cluster Proportional Autoscaler]). +** Observability: Learn about https://www.eksworkshop.com/docs/observability/logging/[Logging], https://www.eksworkshop.com/docs/observability/opensearch/[OpenSearch], https://www.eksworkshop.com/docs/observability/container-insights/[Container Insights on Amazon EKS], and https://www.eksworkshop.com/docs/observability/kubecost/[Cost Visibility with Kubecost] in a set of https://www.eksworkshop.com/docs/observability/[Observability labs]. +** Security: This set of https://www.eksworkshop.com/docs/security/[Security labs] let you explore https://www.eksworkshop.com/docs/security/secrets-management/[Secrets Management], https://www.eksworkshop.com/docs/security/guardduty/[Amazon GuardDuty], https://www.eksworkshop.com/docs/security/pod-security-standards/[Pod Security Standards], and https://www.eksworkshop.com/docs/security/kyverno/[Kyverno policy management]. +** Networking: Learn networking features for Amazon EKS from https://www.eksworkshop.com/docs/networking/[Networking] labs that include https://www.eksworkshop.com/docs/networking/vpc-cni/[Amazon VPC CNI] (supporting network plugins) and https://www.eksworkshop.com/docs/networking/vpc-lattice/[Amazon VPC Lattice] (for configuring clusters across VC and user accounts). +** Automation: Labs on https://www.eksworkshop.com/docs/automation/[Automation] step you through https://www.eksworkshop.com/docs/automation/gitops/[GitOps] methods of managing your clusters and projects like https://www.eksworkshop.com/docs/automation/controlplanes/ack/[{aws} Controllers for Kubernetes] and https://www.eksworkshop.com/docs/automation/controlplanes/crossplane/[Crossplane] for managing Amazon EKS control planes. + + +[[eks-hands-on-cluster-setup-tutorials,eks-hands-on-cluster-setup-tutorials.title]] +== Amazon EKS hands-on cluster setup tutorials + +A set of https://community.aws/tags/eks-cluster-setup[Amazon EKS Cluster Setup tutorials] on the {aws} Community site can help you create special-purpose Amazon EKS clusters and enhance those clusters in various ways. The tutorials are divided into three different types: + +_Building clusters_ + +These tutorials help you build clusters that can be used for special purposes. These special purposes include the ability to run: + + + +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-ipv6-globally-scalable[Globally scalable applications based on IPv6] +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-batch-processing[Asynchronous batch tasks] +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-high-traffic[High traffic microservices] +* https://community.aws/tutorials/navigating-amazon-eks/eks-karpenter-fargate[Autoscaling with Karpenter on Fargate] +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-financial-workload[Financial workloads] +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-windows-fargate[Windows Managed Node Groups] + +_Enhancing clusters_ + +Once you have an existing cluster, you can extend and enhance that cluster in ways that allow it to run specialized workloads and otherwise enhance the clusters. These tutorials include ways to: + + + +* https://community.aws/tutorials/navigating-amazon-eks/eks-with-efs-add-on/[Provide storage solutions with EFS CSI] +* https://community.aws/tutorials/navigating-amazon-eks/eks-dynamic-db-storage-ebs-csi[Provide dynamic database storage with EBS CSI] +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-load-balancer-ipv4[Expose applications on IPv4 clusters using the {aws} Load Balancer Controller] +* https://community.aws/tutorials/navigating-amazon-eks/eks-cluster-load-balancer-ipv6[Expose applications on IPv6 clusters using the {aws} Load Balancer Controller] + +_Optimizing {aws} services_ + +Using these tutorials, you can better integrate your clusters with {aws} services. These tutorials include those that help you: + + + +* https://community.aws/tutorials/navigating-amazon-eks/automating-dns-records-for-microservices-using-externaldns/[Manage DNS records for microservices with ExternalDNS] +* https://community.aws/tutorials/navigating-amazon-eks/eks-monitor-containerized-applications[Monitor applications with CloudWatch] +* https://community.aws/tutorials/navigating-amazon-eks/managing-high-volume-batch-sqs-eks[Manage asynchronous tasks with SQS and EFS storage] +* https://community.aws/tutorials/navigating-amazon-eks/eks-integrate-secrets-manager[Consume {aws} Secrets Manager Secrets from workloads] +* https://community.aws/tutorials/navigating-amazon-eks/eks-fargate-mtls-nginx-controller[Set up mTLS with Fargate, NGINX, and ACM PCA] + + +[[eks-samples,eks-samples.title]] +== Amazon EKS Samples + +The https://github.com/aws-samples/aws-eks-se-samples[Amazon EKS Samples] repository stores manifests to use with Amazon EKS. These manifests give you the opportunity to try out different kinds of applications in Amazon EKS or create specific types of Amazon EKS clusters. Samples include manifests to: + + + +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/eksctl/how-to-eks-fargate[Create an {aws} Amazon EKS Fargate cluster] +* https://github.com/aws-samples/aws-eks-se-samples/blob/main/examples/eksctl/how-to-existing-iamrole/existing-role.yaml[Create a cluster with an existing IAM role] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/eksctl/how-to-ubuntu-nodegroups[Add and Ubuntu Managed Node Group to a cluster] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-backup-restore-ebs-pvc[Backup and restore Pod storage with volume snapshots] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-dr-multi-account[Recover EBS volumes mounted as PVCs with multiple accounts] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-enable-proxy-procotcol-clb[Enable proxy protocol for NGINX Ingress Controller with Classic Load Balancers] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-logging-eks-fargate-opensearch[Configure Logging on Fargate to {aws} OpenSearch] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-python-sdk-containers[Run Python SDK with a web federated identity provider] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-setup-nfs-csi-eks[Deploy a sample app on an NFS CSI controller] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-snapshot-restore-resize-sts[Use volume snapshots for StatefulSets] +* https://github.com/aws-samples/aws-eks-se-samples/tree/main/examples/kubernetes/how-to-topology-awareness-hints[Deploy pods across nodes on different availability zones] + +Keep in mind that these samples are for learning and testing purposes only and are not intended to be used in production. + +[[aws-tutorials,aws-tutorials.title]] +== {aws} Tutorials + +The link:tutorials[{aws} Tutorials,type="marketing"] site publishes a few Amazon EKS tutorials, but also offers a search tool to find other tutorials published on {aws} sites (such as the {aws} Community site). Amazon EKS tutorials published directly on this site include: + + + +* link:tutorials/deploy-webapp-eks/[Deploy a Container Web App on Amazon EKS,type="marketing"] +* link:tutorials/amazon-eks-with-spot-instances/[Run Kubernetes clusters for less (Amazon EKS and Spot instances),type="marketing"] +* link:tutorials/cost-optimize-jenkins/[How to cost optimize Jenkins jobs on Kubernetes,type="marketing"] + + +[[developers-workshop,developers-workshop.title]] +== Developers Workshop + +If you are a software developer, looking to create or refactor applications to run on Amazon EKS, the http://developers.eksworkshop.com[Amazon EKS Developers workshop]is a good place to start. The workshop not only helps you build containerized applications, but also helps you deploy those containers to a container registry (link:ecr/[ECR,type="marketing"]) and from there to an Amazon EKS cluster. + +Start with the https://developers.eksworkshop.com/docs/python/[Amazon EKS Python Workshop] to go through the process of refactoring a python application, then set up your development environment to prepare for deploying the application. Step through sections on Containers, Kubernetes, and Amazon EKS to prepare to run your containerized applications in those environments. + +[[terraform-workshop,terraform-workshop.title]] +== Terraform Workshop + +While `eksctl` is a simple tool for creating a cluster, for more complex infrastructure-as-code types of Amazon EKS deployments, https://www.terraform.io/[Terraform] is a popular Amazon EKS cluster creation and management tool. The https://catalog.us-east-1.prod.workshops.aws/workshops/afee4679-89af-408b-8108-44f5b1065cc7/en-US[Terraform Amazon EKS Workshop] teaches how to use Terraform to build an {aws} VPC, create Amazon EKS clusters, and add optional enhancements to your cluster. In particular, there is a section for creating a https://catalog.us-east-1.prod.workshops.aws/workshops/afee4679-89af-408b-8108-44f5b1065cc7/en-US/500-eks-terraform-workshop[private Amazon EKS cluster] + +[[aws-eks-training,aws-eks-training.title]] +== {aws} Amazon EKS Training + +{aws} offers formal training for learning about Amazon EKS. A three-day training course entitled link:training/classroom/running-containers-on-amazon-elastic-kubernetes-service-amazon-eks/[Running Containers on Amazon Elastic Kubernetes Service,type="marketing"] teaches: + + + +* Kubernetes and Amazon EKS fundamentals +* How to build Amazon EKS clusters +* Securing Amazon EKS with {aws} IAM and Kubernetes RBAC authorization +* GitOps automation tools +* Monitoring tools +* Techniques for improving cost, efficiency, and resiliency diff --git a/latest/ug/getting-started/setting-up.adoc b/latest/ug/getting-started/setting-up.adoc new file mode 100644 index 00000000..036f8986 --- /dev/null +++ b/latest/ug/getting-started/setting-up.adoc @@ -0,0 +1,47 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[[setting-up,setting-up.title]] += Set up to use Amazon EKS +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Set up to use Amazon EKS +:info_titleabbrev: Set up +:keywords: setting up, setup +:info_abstract: Set up the tools needed for creating and working with an Amazon EKS cluster. + +[abstract] +-- +Set up the tools needed for creating and working with an Amazon EKS cluster. +-- + +To prepare for the command-line management of your Amazon EKS clusters, you need to install several tools. Use the following to set up credentials, create and modify clusters, and work with clusters once they are running: + + + +* <> – Get the {aws} CLI to set up and manage the services you need to work with Amazon EKS clusters. In particular, you need {aws} CLI to configure credentials, but you also need it with other {aws} services. +* <> – The `eksctl` CLI interacts with {aws} to create, modify, and delete Amazon EKS clusters. Once a cluster is up, use the open source `kubectl` command to manage Kubernetes objects within your Amazon EKS clusters. +* Set up a development environment (optional)– Consider adding the following tools: ++ +** *Local deployment tool* – If you're new to [.noloc]`Kubernetes`, consider installing a local deployment tool like https://minikube.sigs.k8s.io/docs/[minikube] or https://kind.sigs.k8s.io/[kind]. These tools allow you to have an Amazon EKS cluster on your local machine for testing applications. +** *Package manager* – https://helm.sh/docs/intro/install/[Helm] is a popular package manager for [.noloc]`Kubernetes` that simplifies the installation and management of complex packages. With [.noloc]`Helm`, it's easier to install and manage packages like the {aws} Load Balancer Controller on your Amazon EKS cluster. + + +[[setting-up-next-steps,setting-up-next-steps.title]] +== Next steps + +* <> +* <> +* <> + + +include::install-awscli.adoc[leveloffset=+1] + + +include::install-kubectl.adoc[leveloffset=+1] diff --git a/latest/ug/iam_policies/AmazonEKS_CNI_IPv6_Policy.json b/latest/ug/iam_policies/AmazonEKS_CNI_IPv6_Policy.json new file mode 100644 index 00000000..23d72e4e --- /dev/null +++ b/latest/ug/iam_policies/AmazonEKS_CNI_IPv6_Policy.json @@ -0,0 +1,25 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignIpv6Addresses", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeInstanceTypes" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": [ + "arn:aws:ec2:*:*:network-interface/*" + ] + } + ] +} diff --git a/latest/ug/iam_policies/EKS_trust_relationship.json b/latest/ug/iam_policies/EKS_trust_relationship.json new file mode 100644 index 00000000..d763eb1f --- /dev/null +++ b/latest/ug/iam_policies/EKS_trust_relationship.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ssm.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/latest/ug/iam_policies/Worker_node_trust_relationship.json b/latest/ug/iam_policies/Worker_node_trust_relationship.json new file mode 100644 index 00000000..3c0e5598 --- /dev/null +++ b/latest/ug/iam_policies/Worker_node_trust_relationship.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/latest/ug/images/2048.png b/latest/ug/images/2048.png new file mode 100644 index 00000000..9c71fe59 Binary files /dev/null and b/latest/ug/images/2048.png differ diff --git a/latest/ug/images/AWS_LOGO_RGB_200px.png b/latest/ug/images/AWS_LOGO_RGB_200px.png new file mode 100644 index 00000000..b0288912 Binary files /dev/null and b/latest/ug/images/AWS_LOGO_RGB_200px.png differ diff --git a/latest/ug/images/EKS_CNI_metrics.png b/latest/ug/images/EKS_CNI_metrics.png new file mode 100644 index 00000000..cddfcab6 Binary files /dev/null and b/latest/ug/images/EKS_CNI_metrics.png differ diff --git a/latest/ug/images/SNAT-disabled.jpg b/latest/ug/images/SNAT-disabled.jpg new file mode 100644 index 00000000..0e4bc7a5 Binary files /dev/null and b/latest/ug/images/SNAT-disabled.jpg differ diff --git a/latest/ug/images/SNAT-enabled.jpg b/latest/ug/images/SNAT-enabled.jpg new file mode 100644 index 00000000..929ba2d8 Binary files /dev/null and b/latest/ug/images/SNAT-enabled.jpg differ diff --git a/latest/ug/images/adot_comp_workflow.png b/latest/ug/images/adot_comp_workflow.png new file mode 100644 index 00000000..5c0db83c Binary files /dev/null and b/latest/ug/images/adot_comp_workflow.png differ diff --git a/latest/ug/images/aws_cover.jpg b/latest/ug/images/aws_cover.jpg new file mode 100644 index 00000000..1e77c372 Binary files /dev/null and b/latest/ug/images/aws_cover.jpg differ diff --git a/latest/ug/images/connector-model.png b/latest/ug/images/connector-model.png new file mode 100644 index 00000000..52404570 Binary files /dev/null and b/latest/ug/images/connector-model.png differ diff --git a/latest/ug/images/console-cni-config-network-policy-logs-cwl.png b/latest/ug/images/console-cni-config-network-policy-logs-cwl.png new file mode 100644 index 00000000..25a12c23 Binary files /dev/null and b/latest/ug/images/console-cni-config-network-policy-logs-cwl.png differ diff --git a/latest/ug/images/console-cni-config-network-policy-logs.png b/latest/ug/images/console-cni-config-network-policy-logs.png new file mode 100644 index 00000000..689a0f29 Binary files /dev/null and b/latest/ug/images/console-cni-config-network-policy-logs.png differ diff --git a/latest/ug/images/console-cni-config-network-policy.png b/latest/ug/images/console-cni-config-network-policy.png new file mode 100644 index 00000000..ca8783ef Binary files /dev/null and b/latest/ug/images/console-cni-config-network-policy.png differ diff --git a/latest/ug/images/dashboard-token-auth.png b/latest/ug/images/dashboard-token-auth.png new file mode 100644 index 00000000..f41fcd75 Binary files /dev/null and b/latest/ug/images/dashboard-token-auth.png differ diff --git a/latest/ug/images/eks-iam.png b/latest/ug/images/eks-iam.png new file mode 100644 index 00000000..99d6256c Binary files /dev/null and b/latest/ug/images/eks-iam.png differ diff --git a/latest/ug/images/eksctl-create-cluster.gif b/latest/ug/images/eksctl-create-cluster.gif new file mode 100644 index 00000000..077c920c Binary files /dev/null and b/latest/ug/images/eksctl-create-cluster.gif differ diff --git a/latest/ug/images/hybrid-prereq-diagram.png b/latest/ug/images/hybrid-prereq-diagram.png new file mode 100644 index 00000000..808065fd Binary files /dev/null and b/latest/ug/images/hybrid-prereq-diagram.png differ diff --git a/latest/ug/images/k8sinaction.png b/latest/ug/images/k8sinaction.png new file mode 100644 index 00000000..d47eede2 Binary files /dev/null and b/latest/ug/images/k8sinaction.png differ diff --git a/latest/ug/images/kubecost-architecture.png b/latest/ug/images/kubecost-architecture.png new file mode 100644 index 00000000..166f4150 Binary files /dev/null and b/latest/ug/images/kubecost-architecture.png differ diff --git a/latest/ug/images/kubecost.png b/latest/ug/images/kubecost.png new file mode 100644 index 00000000..cbb7b51d Binary files /dev/null and b/latest/ug/images/kubecost.png differ diff --git a/latest/ug/images/kubernetes-dashboard.png b/latest/ug/images/kubernetes-dashboard.png new file mode 100644 index 00000000..6084860e Binary files /dev/null and b/latest/ug/images/kubernetes-dashboard.png differ diff --git a/latest/ug/images/lbc-overview.png b/latest/ug/images/lbc-overview.png new file mode 100644 index 00000000..03acbc09 Binary files /dev/null and b/latest/ug/images/lbc-overview.png differ diff --git a/latest/ug/images/lbc-overview.svg b/latest/ug/images/lbc-overview.svg new file mode 100644 index 00000000..6118cd39 --- /dev/null +++ b/latest/ug/images/lbc-overview.svg @@ -0,0 +1,4 @@ + + + +
VPC
VPC
Amazon EKS Cluster
Amazon EKS Cluster
Pod
 
 
Pod...
MyApp
MyApp
Pod
 
 
Pod...
MyApp
MyApp
Pod
 
 
Pod...
App2
App2
Users
Users
Amazon Load Balancer
Amazon Load B...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/latest/ug/images/lp-eks-deployment-models.png b/latest/ug/images/lp-eks-deployment-models.png new file mode 100644 index 00000000..5533e0c5 Binary files /dev/null and b/latest/ug/images/lp-eks-deployment-models.png differ diff --git a/latest/ug/images/network-components.png b/latest/ug/images/network-components.png new file mode 100644 index 00000000..f2d5dc63 Binary files /dev/null and b/latest/ug/images/network-components.png differ diff --git a/latest/ug/images/networking-overview.png b/latest/ug/images/networking-overview.png new file mode 100644 index 00000000..5d521f13 Binary files /dev/null and b/latest/ug/images/networking-overview.png differ diff --git a/latest/ug/images/networking.png b/latest/ug/images/networking.png new file mode 100644 index 00000000..7ff089e0 Binary files /dev/null and b/latest/ug/images/networking.png differ diff --git a/latest/ug/images/outpost_env.png b/latest/ug/images/outpost_env.png new file mode 100644 index 00000000..f481d495 Binary files /dev/null and b/latest/ug/images/outpost_env.png differ diff --git a/latest/ug/images/outposts-deployment-options.png b/latest/ug/images/outposts-deployment-options.png new file mode 100644 index 00000000..6180ea04 Binary files /dev/null and b/latest/ug/images/outposts-deployment-options.png differ diff --git a/latest/ug/images/outposts-local-cluster.png b/latest/ug/images/outposts-local-cluster.png new file mode 100644 index 00000000..d37905e5 Binary files /dev/null and b/latest/ug/images/outposts-local-cluster.png differ diff --git a/latest/ug/images/prometheus-metric.png b/latest/ug/images/prometheus-metric.png new file mode 100644 index 00000000..a50c55c7 Binary files /dev/null and b/latest/ug/images/prometheus-metric.png differ diff --git a/latest/ug/images/prometheus.png b/latest/ug/images/prometheus.png new file mode 100644 index 00000000..cf9106b8 Binary files /dev/null and b/latest/ug/images/prometheus.png differ diff --git a/latest/ug/images/quick2048.png b/latest/ug/images/quick2048.png new file mode 100644 index 00000000..69109272 Binary files /dev/null and b/latest/ug/images/quick2048.png differ diff --git a/latest/ug/images/stars-default.png b/latest/ug/images/stars-default.png new file mode 100644 index 00000000..fef302d2 Binary files /dev/null and b/latest/ug/images/stars-default.png differ diff --git a/latest/ug/images/stars-final.png b/latest/ug/images/stars-final.png new file mode 100644 index 00000000..b2b65bf6 Binary files /dev/null and b/latest/ug/images/stars-final.png differ diff --git a/latest/ug/images/stars-front-end-back-end.png b/latest/ug/images/stars-front-end-back-end.png new file mode 100644 index 00000000..bd665d2b Binary files /dev/null and b/latest/ug/images/stars-front-end-back-end.png differ diff --git a/latest/ug/images/stars-no-traffic.png b/latest/ug/images/stars-no-traffic.png new file mode 100644 index 00000000..89d711ea Binary files /dev/null and b/latest/ug/images/stars-no-traffic.png differ diff --git a/latest/ug/images/what-is-eks.png b/latest/ug/images/what-is-eks.png new file mode 100644 index 00000000..0f791640 Binary files /dev/null and b/latest/ug/images/what-is-eks.png differ diff --git a/latest/ug/images/zs-ha-after-failure.png b/latest/ug/images/zs-ha-after-failure.png new file mode 100644 index 00000000..c69a3816 Binary files /dev/null and b/latest/ug/images/zs-ha-after-failure.png differ diff --git a/latest/ug/images/zs-ha-before-failure.png b/latest/ug/images/zs-ha-before-failure.png new file mode 100644 index 00000000..02a3ab93 Binary files /dev/null and b/latest/ug/images/zs-ha-before-failure.png differ diff --git a/latest/ug/images/zs-pod-affinity-rule.png b/latest/ug/images/zs-pod-affinity-rule.png new file mode 100644 index 00000000..5dfdcfa4 Binary files /dev/null and b/latest/ug/images/zs-pod-affinity-rule.png differ diff --git a/latest/ug/images/zs-spread-constraints-2.png b/latest/ug/images/zs-spread-constraints-2.png new file mode 100644 index 00000000..037d7ab7 Binary files /dev/null and b/latest/ug/images/zs-spread-constraints-2.png differ diff --git a/latest/ug/images/zs-spread-constraints.png b/latest/ug/images/zs-spread-constraints.png new file mode 100644 index 00000000..9c078c34 Binary files /dev/null and b/latest/ug/images/zs-spread-constraints.png differ diff --git a/latest/ug/images/zs-traffic-flow-after-1.png b/latest/ug/images/zs-traffic-flow-after-1.png new file mode 100644 index 00000000..0d07800e Binary files /dev/null and b/latest/ug/images/zs-traffic-flow-after-1.png differ diff --git a/latest/ug/images/zs-traffic-flow-after-2.png b/latest/ug/images/zs-traffic-flow-after-2.png new file mode 100644 index 00000000..de21d4c6 Binary files /dev/null and b/latest/ug/images/zs-traffic-flow-after-2.png differ diff --git a/latest/ug/images/zs-traffic-flow-before-1.png b/latest/ug/images/zs-traffic-flow-before-1.png new file mode 100644 index 00000000..1d09e7ac Binary files /dev/null and b/latest/ug/images/zs-traffic-flow-before-1.png differ diff --git a/latest/ug/images/zs-traffic-flow-before-2.png b/latest/ug/images/zs-traffic-flow-before-2.png new file mode 100644 index 00000000..baa00185 Binary files /dev/null and b/latest/ug/images/zs-traffic-flow-before-2.png differ diff --git a/latest/ug/images_BJS/kubecost.png b/latest/ug/images_BJS/kubecost.png new file mode 100644 index 00000000..57fc8782 Binary files /dev/null and b/latest/ug/images_BJS/kubecost.png differ diff --git a/latest/ug/images_BJS/outpost_env.png b/latest/ug/images_BJS/outpost_env.png new file mode 100644 index 00000000..bd1dcfcb Binary files /dev/null and b/latest/ug/images_BJS/outpost_env.png differ diff --git a/latest/ug/images_BJS/outposts-deployment-options.png b/latest/ug/images_BJS/outposts-deployment-options.png new file mode 100644 index 00000000..1b6487e0 Binary files /dev/null and b/latest/ug/images_BJS/outposts-deployment-options.png differ diff --git a/latest/ug/images_BJS/outposts-local-cluster.png b/latest/ug/images_BJS/outposts-local-cluster.png new file mode 100644 index 00000000..42f93f0e Binary files /dev/null and b/latest/ug/images_BJS/outposts-local-cluster.png differ diff --git a/latest/ug/integrations/creating-resources-with-cloudformation.adoc b/latest/ug/integrations/creating-resources-with-cloudformation.adoc new file mode 100644 index 00000000..e936c6bf --- /dev/null +++ b/latest/ug/integrations/creating-resources-with-cloudformation.adoc @@ -0,0 +1,38 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[creating-resources-with-cloudformation,creating-resources-with-cloudformation.title]] += Create Amazon EKS resources with {aws} CloudFormation +:info_doctype: section +:info_title: Create Amazon EKS resources with \ + {aws} CloudFormation +:info_abstract: Learn about how to create resources for Amazon EKS using an {aws} CloudFormation \ + template. + +[abstract] +-- +Learn about how to create resources for Amazon EKS using an {aws} CloudFormation template. +-- + +Amazon EKS is integrated with {aws} CloudFormation, a service that helps you model and set up your {aws} resources so that you can spend less time creating and managing your resources and infrastructure. You create a template that describes all the {aws} resources that you want, for example an Amazon EKS cluster, and {aws} CloudFormation takes care of provisioning and configuring those resources for you. + +When you use {aws} CloudFormation, you can reuse your template to set up your Amazon EKS resources consistently and repeatedly. Just describe your resources once, and then provision the same resources over and over in multiple {aws} accounts and Regions. + +[[working-with-templates,working-with-templates.title]] +== Amazon EKS and {aws} CloudFormation templates + +To provision and configure resources for Amazon EKS and related services, you must understand link:AWSCloudFormation/latest/UserGuide/template-guide.html[{aws} CloudFormation templates,type="documentation"]. Templates are formatted text files in JSON or YAML. These templates describe the resources that you want to provision in your {aws} CloudFormation stacks. If you're unfamiliar with JSON or YAML, you can use {aws} CloudFormation Designer to help you get started with {aws} CloudFormation templates. For more information, see link:AWSCloudFormation/latest/UserGuide/working-with-templates-cfn-designer.html[What is {aws} CloudFormation Designer?,type="documentation"] in the _{aws} CloudFormation User Guide_. + +Amazon EKS supports creating clusters and node groups in {aws} CloudFormation. For more information, including examples of JSON and YAML templates for your Amazon EKS resources, see link:AWSCloudFormation/latest/UserGuide/AWS_EKS.html[Amazon EKS resource type reference,type="documentation"] in the _{aws} CloudFormation User Guide_. + +[[learn-more-cloudformation,learn-more-cloudformation.title]] +== Learn more about {aws} CloudFormation + +To learn more about {aws} CloudFormation, see the following resources: + + + +* link:cloudformation/[{aws} CloudFormation,type="marketing"] +* link:AWSCloudFormation/latest/UserGuide/Welcome.html[{aws} CloudFormation User Guide,type="documentation"] +* link:cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html[{aws} CloudFormation Command Line Interface User Guide,type="documentation"] diff --git a/latest/ug/integrations/eks-integrations.adoc b/latest/ug/integrations/eks-integrations.adoc new file mode 100644 index 00000000..50107196 --- /dev/null +++ b/latest/ug/integrations/eks-integrations.adoc @@ -0,0 +1,40 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[[eks-integrations,eks-integrations.title]] += Enhance EKS with integrated {aws} services +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Enhance EKS with integrated {aws} services +:info_titleabbrev: Working with other services + +In addition to the services covered in other sections, Amazon EKS works with more {aws} services to provide additional solutions. This topic identifies some of the other services that either use Amazon EKS to add functionality, or services that Amazon EKS uses to perform tasks. + +[.topiclist] +[[Topic List]] + +include::creating-resources-with-cloudformation.adoc[leveloffset=+1] + + +include::integration-detective.adoc[leveloffset=+1] + + +include::integration-guardduty.adoc[leveloffset=+1] + + +include::integration-resilience-hub.adoc[leveloffset=+1] + + +include::integration-securitylake.adoc[leveloffset=+1] + + +include::integration-vpc-lattice.adoc[leveloffset=+1] + + +include::local-zones.adoc[leveloffset=+1] diff --git a/latest/ug/integrations/images b/latest/ug/integrations/images new file mode 120000 index 00000000..5e675731 --- /dev/null +++ b/latest/ug/integrations/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/latest/ug/integrations/integration-detective.adoc b/latest/ug/integrations/integration-detective.adoc new file mode 100644 index 00000000..038992f3 --- /dev/null +++ b/latest/ug/integrations/integration-detective.adoc @@ -0,0 +1,39 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[integration-detective,integration-detective.title]] += Analyze security events on EKS with Amazon Detective +:info_doctype: section +:info_title: Analyze security events on EKS with Amazon Detective +:info_abstract: Amazon Detective helps you analyze, investigate, and quickly identify the root cause \ + of security findings or suspicious activities. +:keywords: Amazon Detective + +[abstract] +-- +Amazon Detective helps you analyze, investigate, and quickly identify the root cause of security findings or suspicious activities. +-- + +link:detective/[Amazon Detective,type="marketing"] helps you analyze, investigate, and quickly identify the root cause of security findings or suspicious activities. Detective automatically collects log data from your {aws} resources. It then uses machine learning, statistical analysis, and graph theory to generate visualizations that help you to conduct faster and more efficient security investigations. The Detective prebuilt data aggregations, summaries, and context help you to quickly analyze and determine the nature and extent of possible security issues. For more information, see the link:detective/latest/adminguide/what-is-detective.html[Amazon Detective User Guide,type="documentation"]. + +Detective organizes [.noloc]`Kubernetes` and {aws} data into findings such as: + + + +* Amazon EKS cluster details, including the IAM identity that created the cluster and the service role of the cluster. You can investigate the {aws} and [.noloc]`Kubernetes` API activity of these IAM identities with Detective. +* Container details, such as the image and security context. You can also review details for terminated [.noloc]`Pods`. +* [.noloc]`Kubernetes` API activity, including both overall trends in API activity and details on specific API calls. For example, you can show the number of successful and failed [.noloc]`Kubernetes` API calls that were issued during a selected time range. Additionally, the section on newly observed API calls might be helpful to identify suspicious activity. + +Amazon EKS audit logs is an optional data source package that can be added to your Detective behavior graph. You can view the available optional source packages, and their status in your account. For more information, see link:detective/latest/adminguide/source-data-types-EKS.html[Amazon EKS audit logs for Detective,type="documentation"] in the _Amazon Detective User Guide_. + +[[integration-detective-use,integration-detective-use.title]] +== Use Amazon Detective with Amazon EKS + +Before you can review findings, Detective must be enabled for at least 48 hours in the same {aws} Region that your cluster is in. For more information, see link:detective/latest/adminguide/detective-setup.html[Setting up Amazon Detective,type="documentation"] in the _Amazon Detective User Guide_. + +. Open the Detective console at https://console.aws.amazon.com/detective/. +. From the left navigation pane, select *Search*. +. Select *Choose type* and then select *EKS cluster*. +. Enter the cluster name or ARN and then choose *Search*. +. In the search results, choose the name of the cluster that you want to view activity for. For more information about what you can view, see link:detective/latest/userguide/profile-panel-drilldown-kubernetes-api-volume.html[Overall Kubernetes API activity involving an Amazon EKS cluster,type="documentation"] in the _Amazon Detective User Guide_. diff --git a/latest/ug/integrations/integration-guardduty.adoc b/latest/ug/integrations/integration-guardduty.adoc new file mode 100644 index 00000000..e38d5df6 --- /dev/null +++ b/latest/ug/integrations/integration-guardduty.adoc @@ -0,0 +1,36 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[integration-guardduty,integration-guardduty.title]] += Detect threats with Amazon GuardDuty +:info_doctype: section +:info_title: Detect threats with Amazon GuardDuty + +Amazon GuardDuty is a threat detection service that helps protect you accounts, containers, workloads, and the data with your {aws} environment. Using machine learning (ML) models, and anomaly and threat detection capabilities, GuardDuty continuously monitors different log sources and runtime activity to identify and prioritize potential security risks and malicious activities in your environment. + +Among other features, GuardDuty offers the following two features that detect potential threats to your EKS clusters: _EKS Protection_ and _Runtime Monitoring_. + +[NOTE] +==== +*New:* Amazon EKS Auto Mode integrates with GuardDuty. + +==== + + +*EKS Protection*:: +This feature provides threat detection coverage to help you protect Amazon EKS clusters by monitoring the associated [.noloc]`Kubernetes` audit logs. [.noloc]`Kubernetes` audit logs capture sequential actions within your cluster, including activities from users, applications using the [.noloc]`Kubernetes` API, and the control plane. For example, GuardDuty can identify that APIs called to potentially tamper with resources in a [.noloc]`Kubernetes` cluster were invoked by an unauthenticated user. ++ +When you enable EKS Protection, GuardDuty will be able to access your Amazon EKS audit logs only for continuous threat detection. If GuardDuty identifies a potential threat to your cluster, it generates an associated [.noloc]`Kubernetes` audit log _finding_ of a specific type. For more information about the types of findings available from [.noloc]`Kubernetes` audit logs, see link:guardduty/latest/ug/guardduty_finding-types-kubernetes.html[Kubernetes audit logs finding types,type="documentation"] in the Amazon GuardDuty User Guide. ++ +For more information, see link:guardduty/latest/ug/kubernetes-protection.html[EKS Protection,type="documentation"] in the Amazon GuardDuty User Guide. + + +*Runtime Monitoring*:: +This feature monitors and analyzes operating system-level, networking, and file events to help you detect potential threats in specific {aws} workloads in your environment. ++ +When you enable _Runtime Monitoring_ and install the GuardDuty agent in your Amazon EKS clusters, GuardDuty starts monitoring the runtime events associated with this cluster. Note that the GuardDuty agent and _Runtime Monitoring_ aren't available for Amazon EKS Hybrid Nodes, so _Runtime Monitoring_ isn't available for runtime events that occur on your hybrid nodes. If GuardDuty identifies a potential threat to your cluster, it generates an associated _Runtime Monitoring finding_. For example, a threat can potentially start by compromising a single container that runs a vulnerable web application. This web application might have access permissions to the underlying containers and workloads. In this scenario, incorrectly configured credentials could potentially lead to a broader access to the account, and the data stored within it. ++ +To configure _Runtime Monitoring_, you install the GuardDuty agent to your cluster as an _Amazon EKS add-on_. For more information the add-on, see <>. ++ +For more information, see link:guardduty/latest/ug/runtime-monitoring.html[Runtime Monitoring,type="documentation"] in the Amazon GuardDuty User Guide. diff --git a/latest/ug/integrations/integration-resilience-hub.adoc b/latest/ug/integrations/integration-resilience-hub.adoc new file mode 100644 index 00000000..0d31c02b --- /dev/null +++ b/latest/ug/integrations/integration-resilience-hub.adoc @@ -0,0 +1,10 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[integration-resilience-hub,integration-resilience-hub.title]] += Assess EKS cluster resiliency with {aws} Resilience Hub +:info_doctype: section +:info_title: Assess EKS cluster resiliency with {aws} Resilience Hub + +{aws} Resilience Hub assesses the resiliency of an Amazon EKS cluster by analyzing its infrastructure. {aws} Resilience Hub uses the [.noloc]`Kubernetes` role-based access control (RBAC) configuration to assess the [.noloc]`Kubernetes` workloads deployed to your cluster. For more information, see link:resilience-hub/latest/userguide/enabling-eks-in-arh.html[Enabling {aws} Resilience Hub access to your Amazon EKS cluster,type="documentation"] in the {aws} Resilience Hub User Guide. diff --git a/latest/ug/integrations/integration-securitylake.adoc b/latest/ug/integrations/integration-securitylake.adoc new file mode 100644 index 00000000..40588b76 --- /dev/null +++ b/latest/ug/integrations/integration-securitylake.adoc @@ -0,0 +1,54 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[integration-securitylake,integration-securitylake.title]] += Centralize and analyze EKS security data with Security Lake +:info_doctype: section +:info_title: Centralize and analyze EKS security data with Security Lake +:info_abstract: Amazon Security Lake integrates with Amazon EKS to provide a centralized and standardized \ + solution for collecting, storing, and analyzing security data from clusters. By \ + enabling EKS control plane logging and adding EKS logs as a source in Security Lake, \ + users can gain valuable insights, detect potential threats, and enhance the \ + security posture of their Kubernetes environments. +:keywords: Amazon EKS, Amazon Security Lake, Kubernetes security, centralized security data, threat detection + +[abstract] +-- +Amazon Security Lake integrates with Amazon EKS to provide a centralized and standardized solution for collecting, storing, and analyzing security data from clusters. By enabling EKS control plane logging and adding EKS logs as a source in Security Lake, users can gain valuable insights, detect potential threats, and enhance the security posture of their [.noloc]`Kubernetes` environments. +-- + +Amazon Security Lake is a fully managed security data lake service that allows you to centralize security data from various sources, including Amazon EKS. By integrating Amazon EKS with Security Lake, you can gain deeper insights into the activities performed on your [.noloc]`Kubernetes` resources and enhance the security posture of your Amazon EKS clusters. + +[NOTE] +==== + +For more information about using Security Lake with Amazon EKS and setting up data sources, refer to the link:security-lake/latest/userguide/internal-sources.html#eks-eudit-logs[Amazon Security Lake documentation,type="documentation"]. + +==== + +[[sl-benefits,sl-benefits.title]] +== Benefits of using Security Lake with Amazon Amazon EKS + +*Centralized security data* -- Security Lake automatically collects and centralizes security data from your Amazon EKS clusters, along with data from other {aws} services, SaaS providers, on-premises sources, and third-party sources. This provides a comprehensive view of your security posture across your entire organization. + +*Standardized data format* -- Security Lake converts the collected data into the link:security-lake/latest/userguide/open-cybersecurity-schema-framework.html[Open Cybersecurity Schema Framework (OCSF) format,type="documentation"], which is a standard open-source schema. This normalization enables easier analysis and integration with other security tools and services. + +*Improved threat detection* -- By analyzing the centralized security data, including Amazon EKS control plane logs, you can detect potentially suspicious activities within your Amazon EKS clusters more effectively. This helps in identifying and responding to security incidents promptly. + +*Simplified data management* -- Security Lake manages the lifecycle of your security data with customizable retention and replication settings. This simplifies data management tasks and ensures that you retain the necessary data for compliance and auditing purposes. + +[[sl-enable,sl-enable.title]] +== Enabling Security Lake for Amazon EKS +. Enable Amazon EKS control plane logging for your EKS clusters. Refer to <> for detailed instructions. +. link:security-lake/latest/userguide/internal-sources.html#add-internal-sources[Add Amazon EKS Audit Logs as a source in Security Lake.,type="documentation"] Security Lake will then start collecting in-depth information about the activities performed on the Kubernetes resources running in your EKS clusters. +. link:security-lake/latest/userguide/lifecycle-management.html[Configure retention and replication settings,type="documentation"] for your security data in Security Lake based on your requirements. +. Use the normalized OCSF data stored in Security Lake for incident response, security analytics, and integration with other {aws} services or third-party tools. For example, you can link:big-data/generate-security-insights-from-amazon-security-lake-data-using-amazon-opensearch-ingestion[Generate security insights from Amazon Security Lake data using Amazon OpenSearch Ingestion,type="blog"]. + + +[[sl-format,sl-format.title]] +== Analyzing EKS Logs in Security Lake + +Security Lake normalizes EKS log events to the OCSF format, making it easier to analyze and correlate the data with other security events. You can use various tools and services, such as Amazon Athena, Amazon QuickSight, or third-party security analytics tools, to query and visualize the normalized data. + +For more information about the OCSF mapping for EKS log events, refer to the https://github.com/ocsf/examples/tree/main/mappings/markdown/{aws}/v1.1.0/EKS Audit Logs[mapping reference] in the OCSF GitHub repository. diff --git a/latest/ug/integrations/integration-vpc-lattice.adoc b/latest/ug/integrations/integration-vpc-lattice.adoc new file mode 100644 index 00000000..84bc58b1 --- /dev/null +++ b/latest/ug/integrations/integration-vpc-lattice.adoc @@ -0,0 +1,10 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[integration-vpc-lattice,integration-vpc-lattice.title]] += Enable secure cross-cluster connectivity with Amazon VPC Lattice +:info_doctype: section +:info_title: Enable secure cross-cluster connectivity with Amazon VPC Lattice + +Amazon VPC Lattice is a fully managed application networking service built directly into the {aws} networking infrastructure that you can use to connect, secure, and monitor your services across multiple accounts and Virtual Private Clouds (VPCs). With Amazon EKS, you can leverage Amazon VPC Lattice through the use of the {aws} Gateway API Controller, an implementation of the Kubernetes https://gateway-api.sigs.k8s.io/[Gateway API]. Using Amazon VPC Lattice, you can set up cross-cluster connectivity with standard [.noloc]`Kubernetes` semantics in a simple and consistent manner. To get started using Amazon VPC Lattice with Amazon EKS see the https://www.gateway-api-controller.eks.aws.dev/[{aws} Gateway API Controller User Guide]. diff --git a/latest/ug/integrations/local-zones.adoc b/latest/ug/integrations/local-zones.adoc new file mode 100644 index 00000000..4df1cf27 --- /dev/null +++ b/latest/ug/integrations/local-zones.adoc @@ -0,0 +1,17 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[local-zones,local-zones.title]] += Launch low-latency EKS clusters with {aws} Local Zones +:info_doctype: section +:info_title: Launch low-latency EKS clusters with {aws} Local Zones + +An link:about-aws/global-infrastructure/localzones/[{aws} Local Zone,type="marketing"] is an extension of an {aws} Region in geographic proximity to your users. Local Zones have their own connections to the internet and support link:directconnect/[{aws} Direct Connect,type="marketing"]. Resources created in a Local Zone can serve local users with low-latency communications. For more information, see the link:local-zones/latest/ug/what-is-aws-local-zones.html[{aws} Local Zones User Guide,type="documentation"] and link:AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones[Local Zones,type="documentation"] in the _Amazon EC2 User Guide_. + +Amazon EKS supports certain resources in Local Zones. This includes <>, <>, Amazon EBS volumes, and Application Load Balancers (ALBs). We recommend that you consider the following when using Local Zones as part of your Amazon EKS cluster. + +* You can't create Fargate nodes in Local Zones with Amazon EKS. +* The Amazon EKS managed [.noloc]`Kubernetes` control plane always runs in the {aws} Region. The Amazon EKS managed [.noloc]`Kubernetes` control plane can't run in the Local Zone. Because Local Zones appear as a subnet within your VPC, [.noloc]`Kubernetes` sees your Local Zone resources as part of that subnet. +* The Amazon EKS [.noloc]`Kubernetes` cluster communicates with the Amazon EC2 instances you run in the {aws} Region or Local Zone using Amazon EKS managed link:AWSEC2/latest/UserGuide/using-eni.html[elastic network interfaces,type="documentation"]. To learn more about Amazon EKS networking architecture, see <>. +* Unlike regional subnets, Amazon EKS can't place network interfaces into your Local Zone subnets. This means that you must not specify Local Zone subnets when you create your cluster. diff --git a/latest/ug/manage-access/aws-access/service-accounts.adoc b/latest/ug/manage-access/aws-access/service-accounts.adoc new file mode 100644 index 00000000..25b1d116 --- /dev/null +++ b/latest/ug/manage-access/aws-access/service-accounts.adoc @@ -0,0 +1,1937 @@ +//!!NODE_ROOT
+include::../../attributes.txt[] + +[.topic] +[[service-accounts,service-accounts.title]] += Grant Kubernetes workloads access to {aws} using [.noloc]`Kubernetes` Service Accounts +:info_doctype: section +:info_title: Grant Kubernetes workloads access to {aws} using Kubernetes Service Accounts +:info_titleabbrev: Grant workloads access to {aws} + +A [.noloc]`Kubernetes` service account provides an identity for processes that run in a [.noloc]`Pod`. For more information see https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin[Managing Service Accounts] in the [.noloc]`Kubernetes` documentation. If your [.noloc]`Pod` needs access to {aws} services, you can map the service account to an {aws} Identity and Access Management identity to grant that access. For more information, see <>. + +[[service-account-tokens,service-account-tokens.title]] +== Service account tokens + +The https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-token-volume[BoundServiceAccountTokenVolume] feature is enabled by default in [.noloc]`Kubernetes` versions. This feature improves the security of service account tokens by allowing workloads running on [.noloc]`Kubernetes` to request JSON web tokens that are audience, time, and key bound. Service account tokens have an expiration of one hour. In earlier [.noloc]`Kubernetes` versions, the tokens didn't have an expiration. This means that clients that rely on these tokens must refresh the tokens within an hour. The following https://kubernetes.io/docs/reference/using-api/client-libraries/[Kubernetes client SDKs] refresh tokens automatically within the required time frame: + + + +* Go version `0.15.7` and later +* Python version `12.0.0` and later +* Java version `9.0.0` and later +* JavaScript version `0.10.3` and later +* Ruby `master` branch +* Haskell version `0.3.0.0` +* [.noloc]`C#` version `7.0.5` and later + +If your workload is using an earlier client version, then you must update it. To enable a smooth migration of clients to the newer time-bound service account tokens, [.noloc]`Kubernetes` adds an extended expiry period to the service account token over the default one hour. For Amazon EKS clusters, the extended expiry period is 90 days. Your Amazon EKS cluster's [.noloc]`Kubernetes` API server rejects requests with tokens that are greater than 90 days old. We recommend that you check your applications and their dependencies to make sure that the Kubernetes client SDKs are the same or later than the versions listed previously. + +When the API server receives requests with tokens that are greater than one hour old, it annotates the API audit log event with `annotations.authentication.k8s.io/stale-token`. The value of the annotation looks like the following example: + +[source,bash,subs="verbatim,attributes"] +---- +subject: system:serviceaccount:common:fluent-bit, seconds after warning threshold: 4185802. +---- + +If your cluster has <> enabled, then the annotations are in the audit logs. You can use the following link:AmazonCloudWatch/latest/logs/AnalyzingLogData.html[CloudWatch Logs Insights,type="documentation"] query to identify all the [.noloc]`Pods` in your Amazon EKS cluster that are using stale tokens: + +[source,bash,subs="verbatim,attributes"] +---- +fields @timestamp +|filter @logStream like /kube-apiserver-audit/ +|filter @message like /seconds after warning threshold/ +|parse @message "subject: *, seconds after warning threshold:*\"" as subject, elapsedtime +---- + +The `subject` refers to the service account that the [.noloc]`Pod` used. The `elapsedtime` indicates the elapsed time (in seconds) after reading the latest token. The requests to the API server are denied when the `elapsedtime` exceeds 90 days (7,776,000 seconds). You should proactively update your applications' [.noloc]`Kubernetes` client SDK to use one of the version listed previously that automatically refresh the token. If the service account token used is close to 90 days and you don't have sufficient time to update your client SDK versions before token expiration, then you can terminate existing [.noloc]`Pods` and create new ones. This results in refetching of the service account token, giving you an additional 90 days to update your client version SDKs. + +If the [.noloc]`Pod` is part of a deployment, the suggested way to terminate [.noloc]`Pods` while keeping high availability is to perform a roll out with the following command. Replace [.replaceable]`my-deployment` with the name of your deployment. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl rollout restart deployment/my-deployment +---- + + +[[boundserviceaccounttoken-validated-add-on-versions,boundserviceaccounttoken-validated-add-on-versions.title]] +== Cluster add-ons + +The following cluster add-ons have been updated to use the [.noloc]`Kubernetes` client SDKs that automatically refetch service account tokens. We recommend making sure that the listed versions, or later versions, are installed on your cluster. + + + +* [.noloc]`Amazon VPC CNI plugin for Kubernetes` and metrics helper plugins version `1.8.0` and later. To check your current version or update it, see <> and https://github.com/aws/amazon-vpc-cni-k8s/blob/master/cmd/cni-metrics-helper/README.md[cni-metrics-helper]. +* [.noloc]`CoreDNS` version `1.8.4` and later. To check your current version or update it, see <>. +* [.noloc]`{aws} Load Balancer Controller` version `2.0.0` and later. To check your current version or update it, see <>. +* A current `kube-proxy` version. To check your current version or update it, see <>. +* {aws} for Fluent Bit version `2.25.0` or later. To update your current version, see https://github.com/aws/aws-for-fluent-bit/releases[Releases] on [.noloc]`GitHub`. +* Fluentd image version https://hub.docker.com/r/fluent/fluentd/tags?page=1&name=v1.14.6-1.2[1.14.6-1.2] or later and Fluentd filter plugin for Kubernetes metadata version https://rubygems.org/gems/fluent-plugin-kubernetes_metadata_filter/versions/2.11.1[2.11.1] or later. + + +[[service-accounts-iam,service-accounts-iam.title]] +== Granting {aws} Identity and Access Management permissions to workloads on Amazon Elastic Kubernetes Service clusters + +Amazon EKS provides two ways to grant {aws} Identity and Access Management permissions to workloads that run in Amazon EKS clusters: _IAM roles for service accounts_, and _EKS Pod Identities_. + + + +*IAM roles for service accounts*:: +_IAM roles for service accounts (IRSA)_ configures Kubernetes applications running on {aws} with fine-grained IAM permissions to access various other {aws} resources such as Amazon S3 buckets, Amazon DynamoDB tables, and more. You can run multiple applications together in the same Amazon EKS cluster, and ensure each application has only the minimum set of permissions that it needs. IRSA was build to support various [.noloc]`Kubernetes` deployment options supported by {aws} such as Amazon EKS, Amazon EKS Anywhere, Red Hat OpenShift Service on {aws}, and self managed [.noloc]`Kubernetes` clusters on Amazon EC2 instances. Thus, IRSA was build using foundational {aws} service like IAM, and did not take any direct dependency on the Amazon EKS service and the EKS API. For more information, see <>. + + +*EKS Pod Identities*:: +EKS Pod Identity offers cluster administrators a simplified workflow for authenticating applications to access various other {aws} resources such as Amazon S3 buckets, Amazon DynamoDB tables, and more. EKS Pod Identity is for EKS only, and as a result, it simplifies how cluster administrators can configure Kubernetes applications to obtain IAM permissions. These permissions can now be easily configured with fewer steps directly through {aws-management-console}, EKS API, and {aws} CLI, and there isn't any action to take inside the cluster in any [.noloc]`Kubernetes` objects. Cluster administrators don't need to switch between the EKS and IAM services, or use privileged IAM operations to configure permissions required by your applications. IAM roles can now be used across multiple clusters without the need to update the role trust policy when creating new clusters. IAM credentials supplied by EKS Pod Identity include role session tags, with attributes such as cluster name, namespace, service account name. Role session tags enable administrators to author a single role that can work across service accounts by allowing access to {aws} resources based on matching tags. For more information, see <>. + + +[[service-accounts-iam-compare,service-accounts-iam-compare.title]] +=== Comparing EKS Pod Identity and IRSA + +At a high level, both EKS Pod Identity and IRSA enables you to grant IAM permissions to applications running on Kubernetes clusters. But they are fundamentally different in how you configure them, the limits supported, and features enabled. Below, we compare some of the key facets of both solutions. + +[cols="1,1,1", options="header"] +|=== +|Attribute +|EKS Pod Identity +|IRSA + + +|Role extensibility +|You have to setup each role once to establish trust with the newly-introduced Amazon EKS service principal `pods.eks.amazonaws.com`. After this one-time step, you don't need to update the role's trust policy each time that it is used in a new cluster. +|You have to update the IAM role's trust policy with the new EKS cluster [.noloc]`OIDC` provider endpoint each time you want to use the role in a new cluster. + +|Cluster scalability +|EKS Pod Identity doesn't require users to setup IAM OIDC provider, so this limit doesn't apply. +|Each EKS cluster has an [.noloc]`OpenID Connect` ([.noloc]`OIDC`) issuer URL associated with it. To use IRSA, a unique [.noloc]`OpenID Connect` provider needs to be created for each EKS cluster in IAM. IAM has a default global limit of 100 [.noloc]`OIDC` providers for each {aws} account. If you plan to have more than 100 EKS clusters for each {aws} account with IRSA, then you will reach the IAM [.noloc]`OIDC` provider limit. + +|Role scalability +|EKS Pod Identity doesn't require users to define trust relationship between IAM role and service account in the trust policy, so this limit doesn't apply. +|In IRSA, you define the trust relationship between an IAM role and service account in the role's trust policy. By default, the length of trust policy size is `2048`. This means that you can typically define 4 trust relationships in a single trust policy. While you can get the trust policy length limit increased, you are typically limited to a max of 8 trust relationships within a single trust policy. + +|Role reusability +|{aws} STS temporary credentials supplied by EKS Pod Identity include role session tags, such as cluster name, namespace, service account name. Role session tags enable administrators to author a single IAM role that can be used with multiple service accounts, with different effective permission, by allowing access to {aws} resources based on tags attached to them. This is also called attribute-based access control (ABAC). For more information, see <>. +|{aws} STS session tags are not supported. You can reuse a role between clusters but every pod receives all of the permissions of the role. + +|Environments supported +|EKS Pod Identity is only available on Amazon EKS. +|IRSA can be used such as Amazon EKS, Amazon EKS Anywhere, Red Hat OpenShift Service on {aws}, and self managed [.noloc]`Kubernetes` clusters on Amazon EC2 instances. + +|EKS versions supported +|EKS [.noloc]`Kubernetes` versions `1.24` or later. For the specific platform versions, see <>. +|All of the supported EKS cluster versions. +|=== + +[.topic] +[[pod-identities,pod-identities.title]] +== Learn how [.noloc]`EKS Pod Identity` grants pods access to {aws} services + +[abstract] +-- +Learn how to provide {aws} service access to your Kubernetes workloads with Amazon EKS Pod Identities, offering least privilege access, credential isolation, and auditability for enhanced security. Discover the benefits and considerations of this identity management solution for your Amazon EKS clusters. +-- + +Applications in a Pod's containers can use an {aws} SDK or the {aws} CLI to make API requests to {aws} services using {aws} Identity and Access Management (IAM) permissions. Applications must sign their {aws} API requests with {aws} credentials. + +_EKS Pod Identities_ provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your {aws} credentials to the containers or using the Amazon EC2 instance's role, you associate an IAM role with a [.noloc]`Kubernetes` service account and configure your [.noloc]`Pods` to use the service account. + +video::aUjJSorBE70[youtube,align = center,height = 405,fileref = https://www.youtube.com/embed/aUjJSorBE70,width = 720] + +Each EKS Pod Identity association maps a role to a service account in a namespace in the specified cluster. If you have the same application in multiple clusters, you can make identical associations in each cluster without modifying the trust policy of the role. + +If a pod uses a service account that has an association, Amazon EKS sets environment variables in the containers of the pod. The environment variables configure the {aws} SDKs, including the {aws} CLI, to use the EKS Pod Identity credentials. + +[[pod-id-benefits,pod-id-benefits.title]] +=== Benefits of EKS Pod Identities + +EKS Pod Identities provide the following benefits: + + + +* *Least privilege* + – You can scope IAM permissions to a service account, and only [.noloc]`Pods` that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as `kiam` or `kube2iam`. +* *Credential isolation* + – A [.noloc]`Pod's` containers can only retrieve credentials for the IAM role that's associated with the service account that the container uses. A container never has access to credentials that are used by other containers in other [.noloc]`Pods`. When using Pod Identities, the [.noloc]`Pod's` containers also have the permissions assigned to the <>, unless you block [.noloc]`Pod` access to the link:AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html["Amazon EC2 Instance Metadata Service (IMDS)", type="documentation"]. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. +* *Auditability* + – Access and event logging is available through {aws} CloudTrail to help facilitate retrospective auditing. + +EKS Pod Identity is a simpler method than <>, as this method doesn't use [.noloc]`OIDC` identity providers. EKS Pod Identity has the following enhancements: + + + +* *Independent operations* + – In many organizations, creating [.noloc]`OIDC` identity providers is a responsibility of different teams than administering the [.noloc]`Kubernetes` clusters. EKS Pod Identity has clean separation of duties, where all configuration of EKS Pod Identity associations is done in Amazon EKS and all configuration of the IAM permissions is done in IAM. +* *Reusability* + – EKS Pod Identity uses a single IAM principal instead of the separate principals for each cluster that IAM roles for service accounts use. Your IAM administrator adds the following principal to the trust policy of any role to make it usable by EKS Pod Identities. ++ +[source,json,subs="verbatim,attributes"] +---- + "Principal": { + "Service": "pods.eks.amazonaws.com" + } +---- +* *Scalability* + -- Each set of temporary credentials are assumed by the [.noloc]`EKS Auth` service in EKS Pod Identity, instead of each {aws} SDK that you run in each pod. Then, the Amazon EKS Pod Identity Agent that runs on each node issues the credentials to the SDKs. Thus the load is reduced to once for each node and isn't duplicated in each pod. For more details of the process, see <>. + +For more information to compare the two alternatives, see <>. + +[[pod-id-setup-overview,pod-id-setup-overview.title]] +=== Overview of setting up EKS Pod Identities + +Turn on EKS Pod Identities by completing the following procedures: + +. <> -- You only complete this procedure once for each cluster. You do not need to complete this step if EKS Auto Mode is enabled on your cluster. +. <> -- Complete this procedure for each unique set of permissions that you want an application to have. ++ +. <> -- Complete this procedure for each [.noloc]`Pod` that needs access to {aws} services. +. <> -- Confirm that the workload uses an {aws} SDK of a supported version and that the workload uses the default credential chain. + + +[[pod-id-considerations,pod-id-considerations.title]] +=== EKS Pod Identity considerations + +* You can associate one IAM role to each [.noloc]`Kubernetes` service account in each cluster. You can change which role is mapped to the service account by editing the EKS Pod Identity association. +* You can only associate roles that are in the same {aws} account as the cluster. You can delegate access from another account to the role in this account that you configure for EKS Pod Identities to use. For a tutorial about delegating access and `AssumeRole`, see link:IAM/latest/UserGuide/tutorial_cross-account-with-roles.html[Delegate access across {aws} accounts using IAM roles,type="documentation"] in the _IAM User Guide_. +* The EKS Pod Identity Agent is required. It runs as a [.noloc]`Kubernetes` `DaemonSet` on your nodes and only provides credentials to pods on the node that it runs on. For more information about EKS Pod Identity Agent compatibility, see the following section <>. +* If you are using Security Group for Pods along with Pod Identity Agent, you may need to set the `POD_SECURITY_GROUP_ENFORCING_MODE` Flag for the {aws} VPC CNI. For more information on security group for pods considerations, see <>. +* The EKS Pod Identity Agent uses the `hostNetwork` of the node and it uses port `80` and port `2703` on a link-local address on the node. This address is `169.254.170.23` for [.noloc]`IPv4` and `[fd00:ec2::23]` for [.noloc]`IPv6` clusters. ++ +If you disable `IPv6` addresses, or otherwise prevent localhost `IPv6` IP addresses, the agent can't start. To start the agent on nodes that can't use `IPv6`, follow the steps in <> to disable the `IPv6` configuration. + + +[[pod-id-cluster-versions,pod-id-cluster-versions.title]] +==== EKS Pod Identity cluster versions + +To use EKS Pod Identities, the cluster must have a platform version that is the same or later than the version listed in the following table, or a [.noloc]`Kubernetes` version that is later than the versions listed in the table. + +[cols="1,1", options="header"] +|=== +|Kubernetes version +|Platform version + + +|`1.31` +|`eks.4` + +|`1.30` +|`eks.2` + +|`1.29` +|`eks.1` + +|`1.28` +|`eks.4` + +|`1.27` +|`eks.8` + +|`1.26` +|`eks.9` + +|`1.25` +|`eks.10` + +|`1.24` +|`eks.13` +|=== + +[[pod-id-restrictions,pod-id-restrictions.title]] +==== EKS Pod Identity restrictions + +EKS Pod Identities are available on the following: + + + +* Amazon EKS cluster versions listed in the previous topic <>. +* Worker nodes in the cluster that are Linux Amazon EC2 instances. + +EKS Pod Identities aren't available on the following: + + + +* {aws} Outposts. +* Amazon EKS Anywhere. +* [.noloc]`Kubernetes` clusters that you create and run on Amazon EC2. The EKS Pod Identity components are only available on Amazon EKS. + +You can't use EKS Pod Identities with: + + + +* Pods that run anywhere except Linux Amazon EC2 instances. Linux and Windows pods that run on {aws} Fargate (Fargate) aren't supported. Pods that run on Windows Amazon EC2 instances aren't supported. + + + + +[.topic] +[[pod-id-how-it-works,pod-id-how-it-works.title]] +=== Understand how [.noloc]`EKS Pod Identity` works + +[abstract] +-- +Learn how Amazon EKS Pod Identity works to provide temporary credentials to your Kubernetes workloads, using an agent running on each node and the {aws} SDKs. +-- + +Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. + +Amazon EKS Pod Identity provides credentials to your workloads with an additional _EKS Auth_ API and an agent pod that runs on each node. + +In your add-ons, such as _Amazon EKS add-ons_ and self-managed controller, operators, and other add-ons, the author needs to update their software to use the latest {aws} SDKs. For the list of compatibility between EKS Pod Identity and the add-ons produced by Amazon EKS, see the previous section <>. + +[[pod-id-credentials,pod-id-credentials.title]] +==== Using EKS Pod Identities in your code + +In your code, you can use the {aws} SDKs to access {aws} services. You write code to create a client for an {aws} service with an SDK, and by default the SDK searches in a chain of locations for {aws} Identity and Access Management credentials to use. After valid credentials are found, the search is stopped. For more information about the default locations used, see the link:sdkref/latest/guide/standardized-credentials.html#credentialProviderChain[Credential provider chain,type="documentation"] in the {aws} SDKs and Tools Reference Guide. + +EKS Pod Identities have been added to the _Container credential provider_ which is searched in a step in the default credential chain. If your workloads currently use credentials that are earlier in the chain of credentials, those credentials will continue to be used even if you configure an EKS Pod Identity association for the same workload. This way you can safely migrate from other types of credentials by creating the association first, before removing the old credentials. + +The container credentials provider provides temporary credentials from an agent that runs on each node. In Amazon EKS, the agent is the Amazon EKS Pod Identity Agent and on Amazon Elastic Container Service the agent is the `amazon-ecs-agent`. The SDKs use environment variables to locate the agent to connect to. + +In contrast, _IAM roles for service accounts_ provides a _web identity_ token that the {aws} SDK must exchange with {aws} Security Token Service by using `AssumeRoleWithWebIdentity`. + +[[pod-id-agent-pod,pod-id-agent-pod.title]] +==== How EKS Pod Identity Agent works with a [.noloc]`Pod` +. When Amazon EKS starts a new pod that uses a service account with an EKS Pod Identity association, the cluster adds the following content to the [.noloc]`Pod` manifest: ++ +[source,yaml,subs="verbatim,attributes"] +---- + env: + - name: AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE + value: "/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token" + - name: AWS_CONTAINER_CREDENTIALS_FULL_URI + value: "http://169.254.170.23/v1/credentials" + volumeMounts: + - mountPath: "/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/" + name: eks-pod-identity-token + volumes: + - name: eks-pod-identity-token + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: pods.eks.amazonaws.com + expirationSeconds: 86400 # 24 hours + path: eks-pod-identity-token +---- +. [.noloc]`Kubernetes` selects which node to run the pod on. Then, the Amazon EKS Pod Identity Agent on the node uses the link:eks/latest/APIReference/API_auth_AssumeRoleForPodIdentity.html[AssumeRoleForPodIdentity,type="documentation"] action to retrieve temporary credentials from the EKS Auth API. +. The EKS Pod Identity Agent makes these credentials available for the {aws} SDKs that you run inside your containers. +. You use the SDK in your application without specifying a credential provider to use the default credential chain. Or, you specify the container credential provider. For more information about the default locations used, see the link:sdkref/latest/guide/standardized-credentials.html#credentialProviderChain[Credential provider chain,type="documentation"] in the {aws} SDKs and Tools Reference Guide. +. The SDK uses the environment variables to connect to the EKS Pod Identity Agent and retrieve the credentials. ++ +NOTE: If your workloads currently use credentials that are earlier in the chain of credentials, those credentials will continue to be used even if you configure an EKS Pod Identity association for the same workload. + + +[.topic] +[[pod-id-agent-setup,pod-id-agent-setup.title]] +=== Set up the Amazon EKS Pod Identity Agent + +[abstract] +-- +Learn how to set up the EKS Pod Identity Agent for your cluster. +-- + +Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. + +Amazon EKS Pod Identity provides credentials to your workloads with an additional _EKS Auth_ API and an agent pod that runs on each node. + +[TIP] +==== +You do not need to install the EKS Pod Identity Agent on EKS Auto Mode Clusters. This capability is built into EKS Auto Mode. +==== + + +[[pod-id-agent-considerations,pod-id-agent-considerations.title]] +==== Considerations + +* By default, the EKS Pod Identity Agent listens on an `IPv4` and `IPv6` address for pods to request credentials. The agent uses the loopback (localhost) IP address `169.254.170.23` for `IPv4` and the localhost IP address `[fd00:ec2::23]` for `IPv6`. +* If you disable `IPv6` addresses, or otherwise prevent localhost `IPv6` IP addresses, the agent can't start. To start the agent on nodes that can't use `IPv6`, follow the steps in <> to disable the `IPv6` configuration. + + +[[pod-id-agent-add-on-create,pod-id-agent-add-on-create.title]] +==== Creating the Amazon EKS Pod Identity Agent + +[[pod-id-agent-prereqs,pod-id-agent-prereqs.title]] +===== Agent prerequisites + +* An existing Amazon EKS cluster. To deploy one, see <>. The cluster version and platform version must be the same or later than the versions listed in <>. +* The node role has permissions for the agent to do the `AssumeRoleForPodIdentity` action in the EKS Auth API. You can use the <> or add a custom policy similar to the following: ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "eks-auth:AssumeRoleForPodIdentity" + ], + "Resource": "*" + } + ] +} +---- ++ +This action can be limited by tags to restrict which roles can be assumed by pods that use the agent. +* The nodes can reach and download images from Amazon ECR. The container image for the add-on is in the registries listed in <>. ++ +Note that you can change the image location and provide `imagePullSecrets` for EKS add-ons in the *Optional configuration settings* in the {aws-management-console}, and in the `--configuration-values` in the {aws} CLI. +* The nodes can reach the Amazon EKS Auth API. For private clusters, the `eks-auth` endpoint in {aws} PrivateLink is required. + + +===== Setup agent with {aws} console +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the EKS Pod Identity Agent add-on for. +. Choose the *Add-ons* tab. +. Choose *Get more add-ons*. +. Select the box in the top right of the add-on box for EKS Pod Identity Agent and then choose *Next*. +. On the *Configure selected add-ons settings* page, select any version in the *Version* dropdown list. +. (Optional) Expand *Optional configuration settings* to enter additional configuration. For example, you can provide an alternative container image location and `ImagePullSecrets`. The [.noloc]`JSON Schema` with accepted keys is shown in *Add-on configuration schema*. ++ +Enter the configuration keys and values in *Configuration values*. +. Choose *Next*. +. Confirm that the EKS Pod Identity Agent pods are running on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kube-system | grep 'eks-pod-identity-agent' +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +eks-pod-identity-agent-gmqp7 1/1 Running 1 (24h ago) 24h +eks-pod-identity-agent-prnsh 1/1 Running 1 (24h ago) 24h +---- ++ +You can now use EKS Pod Identity associations in your cluster. For more information, see <>. + + +===== Setup agent with {aws} CLI +. Run the following {aws} CLI command. Replace `my-cluster` with the name of your cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks create-addon --cluster-name my-cluster --addon-name eks-pod-identity-agent --addon-version v1.0.0-eksbuild.1 +---- ++ +NOTE: The EKS Pod Identity Agent doesn't use the `service-account-role-arn` for _IAM roles for service accounts_. You must provide the EKS Pod Identity Agent with permissions in the node role. +. Confirm that the EKS Pod Identity Agent pods are running on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kube-system | grep 'eks-pod-identity-agent' +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +eks-pod-identity-agent-gmqp7 1/1 Running 1 (24h ago) 24h +eks-pod-identity-agent-prnsh 1/1 Running 1 (24h ago) 24h +---- ++ +You can now use EKS Pod Identity associations in your cluster. For more information, see <>. + + +[.topic] +[[pod-id-association,pod-id-association.title]] +=== Assign an [.noloc]`IAM` role to a [.noloc]`Kubernetes` service account + +[abstract] +-- +Learn how to configure a Kubernetes service account to assume an {aws} IAM role with Amazon EKS Pod Identity for securely accessing {aws} services from your pods. +-- + +This topic covers how to configure a [.noloc]`Kubernetes` service account to assume an {aws} Identity and Access Management (IAM) role with EKS Pod Identity. Any [.noloc]`Pods` that are configured to use the service account can then access any {aws} service that the role has permissions to access. + +To create an EKS Pod Identity association, there is only a single step; you create the association in EKS through the {aws-management-console}, {aws} CLI, {aws} SDKs, {aws} CloudFormation and other tools. There isn't any data or metadata about the associations inside the cluster in any [.noloc]`Kubernetes` objects and you don't add any annotations to the service accounts. + + + +* An existing cluster. If you don't have one, you can create one by following one of the guides in <>. +* The IAM principal that is creating the association must have `iam:PassRole`. +* The latest version of the {aws} CLI installed and configured on your device or {aws} CloudShell. You can check your current version with `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the {aws} Command Line Interface User Guide. The {aws} CLI version installed in the {aws} CloudShell may also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the {aws} CloudShell User Guide. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* An existing `kubectl` `config` file that contains your cluster configuration. To create a `kubectl` `config` file, see <>. + + +[[pod-id-association-create,pod-id-association-create.title]] +==== Create a Pod Identity association ({aws} Console) + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the EKS Pod Identity Agent add-on for. +. Choose the *Access* tab. +. In the *Pod Identity associations*, choose *Create*. +. For the *IAM role*, select the IAM role with the permissions that you want the workload to have. ++ +NOTE: The list only contains roles that have the following trust policy which allows EKS Pod Identity to use them. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowEksAuthToAssumeRoleForPodIdentity", + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +---- ++ +`sts:AssumeRole` -- EKS Pod Identity uses `AssumeRole` to assume the IAM role before passing the temporary credentials to your pods. ++ +`sts:TagSession` -- EKS Pod Identity uses `TagSession` to include _session tags_ in the requests to {aws} STS. ++ +You can use these tags in the _condition keys_ in the trust policy to restrict which service accounts, namespaces, and clusters can use this role. ++ +For a list of Amazon EKS condition keys, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-policy-keys[Conditions defined by Amazon Elastic Kubernetes Service,type="documentation"] in the _Service Authorization Reference_. To learn which actions and resources you can use a condition key with, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-actions-as-permissions[Actions defined by Amazon Elastic Kubernetes Service,type="documentation"]. +. For the *[.noloc]`Kubernetes` namespace*, select the [.noloc]`Kubernetes` namespace that contains the service account and workload. Optionally, you can specify a namespace by name that doesn't exist in the cluster. +. For the *[.noloc]`Kubernetes` service account*, select the [.noloc]`Kubernetes` service account to use. The manifest for your [.noloc]`Kubernetes` workload must specify this service account. Optionally, you can specify a service account by name that doesn't exist in the cluster. +. (Optional) For the *Tags*, choose *Add tag* to add metadata in a key and value pair. These tags are applied to the association and can be used in IAM policies. ++ +You can repeat this step to add multiple tags. +. Choose *Create*. + + +==== Create a Pod Identity association ({aws} CLI) +. If you want to associate an existing IAM policy to your IAM role, skip to the next step. ++ +Create an IAM policy. You can create your own policy, or copy an {aws} managed policy that already grants some of the permissions that you need and customize it to your specific requirements. For more information, see link:IAM/latest/UserGuide/access_policies_create.html[Creating IAM policies,type="documentation"] in the _IAM User Guide_. ++ +.. Create a file that includes the permissions for the {aws} services that you want your [.noloc]`Pods` to access. For a list of all actions for all {aws} services, see the link:service-authorization/latest/reference/[Service Authorization Reference,type="documentation"]. ++ +You can run the following command to create an example policy file that allows read-only access to an Amazon S3 bucket. You can optionally store configuration information or a bootstrap script in this bucket, and the containers in your [.noloc]`Pod` can read the file from the bucket and load it into your application. If you want to create this example policy, copy the following contents to your device. Replace [.replaceable]`my-pod-secrets-bucket` with your bucket name and run the command. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >my-policy.json <my-service-account.yaml <trust-relationship.json <> + +[.topic] +[[pod-id-configure-pods,pod-id-configure-pods.title]] +=== Configure [.noloc]`pods` to access {aws} services with service accounts + +[abstract] +-- +Learn how to configure Pods to use a Kubernetes service account with an associated IAM role for accessing {aws} services on Amazon EKS. +-- + +If a [.noloc]`Pod` needs to access {aws} services, then you must configure it to use a [.noloc]`Kubernetes` service account. The service account must be associated to an {aws} Identity and Access Management (IAM) role that has permissions to access the {aws} services. + + + +* An existing cluster. If you don't have one, you can create one using one of the guides in <>. +* An existing [.noloc]`Kubernetes` service account and an EKS Pod Identity association that associates the service account with an IAM role. The role must have an associated IAM policy that contains the permissions that you want your [.noloc]`Pods` to have to use {aws} services. For more information about how to create the service account and role, and configure them, see <>. +* The latest version of the {aws} CLI installed and configured on your device or {aws} CloudShell. You can check your current version with `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the {aws} Command Line Interface User Guide. The {aws} CLI version installed in the {aws} CloudShell may also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the {aws} CloudShell User Guide. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* An existing `kubectl` `config` file that contains your cluster configuration. To create a `kubectl` `config` file, see <>. +. Use the following command to create a deployment manifest that you can deploy a [.noloc]`Pod` to confirm configuration with. Replace the [.replaceable]`example values` with your own values. ++ +[source,yaml,subs="verbatim,attributes"] +---- +cat >my-deployment.yaml <>, the [.noloc]`Pod` still has access to these credentials. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. ++ +If your [.noloc]`Pods` can't interact with the services as you expected, complete the following steps to confirm that everything is properly configured. ++ +.. Confirm that your [.noloc]`Pods` use an {aws} SDK version that supports assuming an IAM role through an EKS Pod Identity association. For more information, see <>. +.. Confirm that the deployment is using the service account. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe deployment my-app | grep "Service Account" +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +Service Account: my-service-account +---- + + +[.topic] +[[pod-id-abac,pod-id-abac.title]] +=== Grant [.noloc]`pods` access to {aws} resources based on tags + +[abstract] +-- +Learn how to use Amazon EKS Pod Identity to attach tags for cluster, namespace, and service account to temporary credentials, enabling attribute-based access control (ABAC) for EKS pods to {aws} resources based on matching tags. +-- + +EKS Pod Identity attaches tags to the temporary credentials to each pod with attributes such as cluster name, namespace, service account name. These role session tags enable administrators to author a single role that can work across service accounts by allowing access to {aws} resources based on matching tags. By adding support for role session tags, customers can enforce tighter security boundaries between clusters, and workloads within clusters, while reusing the same IAM roles and IAM policies. + +For example, the following policy allows the `s3:GetObject` action if the object is tagged with the name of the EKS cluster. + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:GetObjectTagging" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "s3:ExistingObjectTag/eks-cluster-name": "${aws:PrincipalTag/eks-cluster-name}" + } + } + } + ] +} +---- + + +[[pod-id-abac-tags,pod-id-abac-tags.title]] +==== List of session tags added by EKS Pod Identity + +The following list contains all of the keys for tags that are added to the `AssumeRole` request made by Amazon EKS. To use these tags in policies, use `${aws:PrincipalTag/` followed by the key, for example `${aws:PrincipalTag/kubernetes-namespace}`. + + + +* `eks-cluster-arn` +* `eks-cluster-name` +* `kubernetes-namespace` +* `kubernetes-service-account` +* `kubernetes-pod-name` +* `kubernetes-pod-uid` + + +[[pod-id-abac-chaining,pod-id-abac-chaining.title]] +==== Cross-account tags + +All of the session tags that are added by EKS Pod Identity are _transitive_; the tag keys and values are passed to any `AssumeRole` actions that your workloads use to switch roles into another account. You can use these tags in policies in other accounts to limit access in cross-account scenarios. For more infromation, see link:IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining[Chaining roles with session tags,type="documentation"] in the _IAM User Guide_. + +[[pod-id-abac-custom-tags,pod-id-abac-custom-tags.title]] +==== Custom tags + +EKS Pod Identity can't add additional custom tags to the `AssumeRole` action that it performs. However, tags that you apply to the IAM role are always available though the same format: `${aws:PrincipalTag/` followed by the key, for example `${aws:PrincipalTag/MyCustomTag}`. + +[NOTE] +==== + +Tags added to the session through the `sts:AssumeRole` request take precedence in the case of conflict. For example, say that: + + + +* Amazon EKS adds a key `eks-cluster-name` and value `my-cluster` to the session when EKS assumes the customer role and +* You add an `eks-cluster-name` tag to the IAM role with the value `my-own-cluster`. + +In this case, the former takes precedence and the value for the `eks-cluster-name` tag will be `my-cluster`. + +==== + +[.topic] +[[pod-id-minimum-sdk,pod-id-minimum-sdk.title]] +=== Use pod identity with the {aws} SDK + +[[pod-id-using-creds,pod-id-using-creds.title]] +==== Using EKS Pod Identity credentials + +To use the credentials from a EKS Pod Identity association, your code can use any {aws} SDK to create a client for an {aws} service with an SDK, and by default the SDK searches in a chain of locations for {aws} Identity and Access Management credentials to use. The EKS Pod Identity credentials will be used if you don't specify a credential provider when you create the client or otherwise initialized the SDK. + +This works because EKS Pod Identities have been added to the _Container credential provider_ which is searched in a step in the default credential chain. If your workloads currently use credentials that are earlier in the chain of credentials, those credentials will continue to be used even if you configure an EKS Pod Identity association for the same workload. + +For more information about how EKS Pod Identities work, see <>. + +When using <>, the containers in your [.noloc]`Pods` must use an {aws} SDK version that supports assuming an IAM role from the EKS Pod Identity Agent. Make sure that you're using the following versions, or later, for your {aws} SDK: + + + +* Java (Version 2) – https://github.com/aws/aws-sdk-java-v2/releases/tag/2.21.30[2.21.30] +* Java – https://github.com/aws/aws-sdk-java/releases/tag/1.12.746[1.12.746] +* Go v1 – https://github.com/aws/aws-sdk-go/releases/tag/v1.47.11[v1.47.11] +* Go v2 – https://github.com/aws/aws-sdk-go-v2/releases/tag/release-2023-11-14[release-2023-11-14] +* Python (Boto3) – https://github.com/boto/boto3/releases/tag/1.34.41[1.34.41] +* Python (botocore) – https://github.com/boto/botocore/releases/tag/1.34.41[1.34.41] +* {aws} CLI – https://github.com/aws/aws-cli/releases/tag/1.30.0[1.30.0] ++ +{aws} CLI – https://github.com/aws/aws-cli/releases/tag/2.15.0[2.15.0] +* JavaScript v2 – https://github.com/aws/aws-sdk-js/releases/tag/v2.1550.0[2.1550.0] +* JavaScript v3 – https://github.com/aws/aws-sdk-js-v3/releases/tag/v3.458.0[v3.458.0] +* Kotlin – https://github.com/awslabs/aws-sdk-kotlin/releases/tag/v1.0.1[v1.0.1] +* Ruby – https://github.com/aws/aws-sdk-ruby/blob/version-3/gems/aws-sdk-core/CHANGELOG.md#31880-2023-11-22[3.188.0] +* Rust – https://github.com/awslabs/aws-sdk-rust/releases/tag/release-2024-03-13[release-2024-03-13] +* {cpp} – https://github.com/aws/aws-sdk-cpp/releases/tag/1.11.263[1.11.263] +* .NET – https://github.com/aws/aws-sdk-net/releases/tag/3.7.734.0[3.7.734.0] +* PowerShell – https://www.powershellgallery.com/packages/{aws}.Tools.Common/4.1.502[4.1.502] +* PHP – https://github.com/aws/aws-sdk-php/releases/tag/3.287.1[3.287.1] + +To ensure that you're using a supported SDK, follow the installation instructions for your preferred SDK at link:tools/[Tools to Build on {aws},type="marketing"] when you build your containers. + +For a list of add-ons that support EKS Pod Identity, see <>. + +[.topic] +[[pod-id-agent-config-ipv6,pod-id-agent-config-ipv6.title]] +=== Disable `IPv6` in the EKS Pod Identity Agent + +[[pod-id-console,pod-id-console.title]] +==== {aws-management-console} +. To disable `IPv6` in the EKS Pod Identity Agent, add the following configuration to the *Optional configuration settings* of the EKS Add-on. ++ +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the add-on for. +.. Choose the *Add-ons* tab. +.. Select the box in the top right of the EKS Pod Identity Agent add-on box and then choose *Edit*. +.. On the *Configure EKS Pod Identity Agent* page: ++ +... Select the *Version* that you'd like to use. We recommend that you keep the same version as the previous step, and update the version and configuration in separate actions. +... Expand the *Optional configuration settings*. +... Enter the JSON key `"agent":` and value of a nested JSON object with a key `"additionalArgs":` in *Configuration values*. The resulting text must be a valid JSON object. If this key and value are the only data in the text box, surround the key and value with curly braces `{ }`. The following example shows network policy is enabled: ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "agent": { + "additionalArgs": { + "-b": "169.254.170.23" + } + } +} +---- ++ +This configuration sets the `IPv4` address to be the only address used by the agent. +.. To apply the new configuration by replacing the EKS Pod Identity Agent pods, choose *Save changes*. ++ +Amazon EKS applies changes to the EKS Add-ons by using a _rollout_ of the [.noloc]`Kubernetes` `DaemonSet` for EKS Pod Identity Agent. You can track the status of the rollout in the *Update history* of the add-on in the {aws-management-console} and with `kubectl rollout status daemonset/eks-pod-identity-agent --namespace kube-system`. ++ +`kubectl rollout` has the following commands: ++ +[source,shell,subs="verbatim,attributes"] +---- +$ kubectl rollout + +history -- View rollout history +pause -- Mark the provided resource as paused +restart -- Restart a resource +resume -- Resume a paused resource +status -- Show the status of the rollout +undo -- Undo a previous rollout +---- ++ +If the rollout takes too long, Amazon EKS will undo the rollout, and a message with the type of *Addon Update* and a status of *Failed* will be added to the *Update history* of the add-on. To investigate any issues, start from the history of the rollout, and run `kubectl logs` on a EKS Pod Identity Agent pod to see the logs of EKS Pod Identity Agent. +. If the new entry in the *Update history* has a status of *Successful*, then the rollout has completed and the add-on is using the new configuration in all of the EKS Pod Identity Agent pods. + +[[pod-id-cli,pod-id-cli.title]] +==== {aws} CLI +. To disable `IPv6` in the EKS Pod Identity Agent, add the following configuration to the *configuration values* of the EKS Add-on. ++ +Run the following {aws} CLI command. Replace `my-cluster` with the name of your cluster and the IAM role ARN with the role that you are using. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name eks-pod-identity-agent \ + --resolve-conflicts PRESERVE --configuration-values '{"agent":{"additionalArgs": { "-b": "169.254.170.23"}}}' +---- ++ +This configuration sets the `IPv4` address to be the only address used by the agent. ++ +Amazon EKS applies changes to the EKS Add-ons by using a _rollout_ of the [.noloc]`Kubernetes` DaemonSet for EKS Pod Identity Agent. You can track the status of the rollout in the *Update history* of the add-on in the {aws-management-console} and with `kubectl rollout status daemonset/eks-pod-identity-agent --namespace kube-system`. ++ +`kubectl rollout` has the following commands: ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl rollout + +history -- View rollout history +pause -- Mark the provided resource as paused +restart -- Restart a resource +resume -- Resume a paused resource +status -- Show the status of the rollout +undo -- Undo a previous rollout +---- ++ +If the rollout takes too long, Amazon EKS will undo the rollout, and a message with the type of *Addon Update* and a status of *Failed* will be added to the *Update history* of the add-on. To investigate any issues, start from the history of the rollout, and run `kubectl logs` on a EKS Pod Identity Agent pod to see the logs of EKS Pod Identity Agent. + + +[.topic] +[[pod-id-role,pod-id-role.title]] +=== Create [.noloc]`IAM` role with trust policy required by [.noloc]`EKS Pod Identity` + +[abstract] +-- +Learn how to configure the IAM trust policy for Amazon EKS Pod Identity to allow Kubernetes pods to assume IAM roles and access {aws} resources securely using Amazon EKS condition keys. +-- + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowEksAuthToAssumeRoleForPodIdentity", + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +---- + +*`sts:AssumeRole`*:: +EKS Pod Identity uses `AssumeRole` to assume the IAM role before passing the temporary credentials to your pods. + + +*`sts:TagSession`*:: +EKS Pod Identity uses `TagSession` to include _session tags_ in the requests to {aws} STS. ++ +You can use these tags in the _condition keys_ in the trust policy to restrict which service accounts, namespaces, and clusters can use this role. ++ +For a list of Amazon EKS condition keys, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-policy-keys[Conditions defined by Amazon Elastic Kubernetes Service,type="documentation"] in the _Service Authorization Reference_. To learn which actions and resources you can use a condition key with, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-actions-as-permissions[Actions defined by Amazon Elastic Kubernetes Service,type="documentation"]. + + +[.topic] +[[iam-roles-for-service-accounts,iam-roles-for-service-accounts.title]] +== IAM roles for service accounts + +[abstract] +-- +Learn how applications in your [.noloc]`Pods` can access {aws} services. +-- + +Applications in a [.noloc]`Pod's` containers can use an {aws} SDK or the {aws} CLI to make API requests to {aws} services using {aws} Identity and Access Management (IAM) permissions. Applications must sign their {aws} API requests with {aws} credentials. IAM roles for service accounts provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your {aws} credentials to the containers or using the Amazon EC2 instance's role, you associate an IAM role with a [.noloc]`Kubernetes` service account and configure your [.noloc]`Pods` to use the service account. You can't use IAM roles for service accounts with <>. + +IAM roles for service accounts provide the following benefits: + +* *Least privilege* + – You can scope IAM permissions to a service account, and only [.noloc]`Pods` that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as `kiam` or `kube2iam`. +* *Credential isolation* + – A [.noloc]`Pod's` containers can only retrieve credentials for the IAM role that's associated with the service account that the container uses. A container never has access to credentials that are used by other containers in other [.noloc]`Pods`. When using IAM roles for service accounts, the [.noloc]`Pod's` containers also have the permissions assigned to the <>, unless you block [.noloc]`Pod` access to the link:AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html[Amazon EC2 Instance Metadata Service (IMDS),type="documentation"]. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. +* *Auditability* + – Access and event logging is available through {aws} CloudTrail to help ensure retrospective auditing. + +Enable IAM roles for service accounts by completing the following procedures: + +. <> – You only complete this procedure once for each cluster. ++ +[NOTE] +==== +If you enabled the EKS VPC endpoint, the EKS OIDC service endpoint couldn't be accessed from inside that VPC. Consequently, your operations such as creating an OIDC provider with `eksctl` in the VPC will not work and will result in a timeout when attempting to request `https://oidc.eks.[.replaceable]``region``.amazonaws.com`. An example error message follows: + +[source,bash,subs="verbatim,attributes"] +---- +server cant find oidc.eks.region.amazonaws.com: NXDOMAIN +---- + +To complete this step, you can run the command outside the VPC, for example in {aws} CloudShell or on a computer connected to the internet. Alternatively, you can create a split-horizon conditional resolver in the VPC, such as Route 53 Resolver to use a different resolver for the OIDC Issuer URL and not use the VPC DNS for it. For an example of conditional forwarding in [.noloc]`CoreDNS`, see the https://github.com/aws/containers-roadmap/issues/2038[Amazon EKS feature request] on [.noloc]`GitHub`. +==== + +. <> – Complete this procedure for each unique set of permissions that you want an application to have. + +. <> – Complete this procedure for each [.noloc]`Pod` that needs access to {aws} services. + +. <> – Confirm that the workload uses an {aws} SDK of a supported version and that the workload uses the default credential chain. + + +[[irsa-oidc-background,irsa-oidc-background.title]] +=== IAM, [.noloc]`Kubernetes`, and [.noloc]`OpenID Connect` ([.noloc]`OIDC`) background information + +In 2014, {aws} Identity and Access Management added support for federated identities using [.noloc]`OpenID Connect` ([.noloc]`OIDC`). This feature allows you to authenticate {aws} API calls with supported identity providers and receive a valid [.noloc]`OIDC` [.noloc]`JSON` web token ([.noloc]`JWT`). You can pass this token to the {aws} STS `AssumeRoleWithWebIdentity` API operation and receive IAM temporary role credentials. You can use these credentials to interact with any {aws} service, including Amazon S3 and DynamoDB. + +Each JWT token is signed by a signing key pair. The keys are served on the OIDC provider managed by Amazon EKS and the private key rotates every 7 days. Amazon EKS keeps the public keys until they expire. If you connect external OIDC clients, be aware that you need to refresh the signing keys before the public key expires. Learn how to <>. + +[.noloc]`Kubernetes` has long used service accounts as its own internal identity system. [.noloc]`Pods` can authenticate with the [.noloc]`Kubernetes` API server using an auto-mounted token (which was a non-[.noloc]`OIDC` [.noloc]`JWT`) that only the [.noloc]`Kubernetes` API server could validate. These legacy service account tokens don't expire, and rotating the signing key is a difficult process. In [.noloc]`Kubernetes` version `1.12`, support was added for a new `ProjectedServiceAccountToken` feature. This feature is an [.noloc]`OIDC` [.noloc]`JSON` web token that also contains the service account identity and supports a configurable audience. + +Amazon EKS hosts a public [.noloc]`OIDC` discovery endpoint for each cluster that contains the signing keys for the `ProjectedServiceAccountToken` [.noloc]`JSON` web tokens so external systems, such as IAM, can validate and accept the [.noloc]`OIDC` tokens that are issued by [.noloc]`Kubernetes`. + +[.topic] +[[enable-iam-roles-for-service-accounts,enable-iam-roles-for-service-accounts.title]] +=== Create an IAM [.noloc]`OIDC` provider for your cluster + +[abstract] +-- +Learn how to create an {aws} Identity and Access Management [.noloc]`OpenID Connect` provider for your cluster. +-- + +Your cluster has an https://openid.net/connect/[OpenID Connect] ([.noloc]`OIDC`) issuer URL associated with it. To use {aws} Identity and Access Management (IAM) roles for service accounts, an IAM [.noloc]`OIDC` provider must exist for your cluster's [.noloc]`OIDC` issuer URL. + + + +* An existing Amazon EKS cluster. To deploy one, see <>. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* An existing `kubectl` `config` file that contains your cluster configuration. To create a `kubectl` `config` file, see <>. + +You can create an IAM [.noloc]`OIDC` provider for your cluster using `eksctl` or the {aws-management-console}. + +==== Create OIDC provider (eksctl) + +. Version `{eksctl-min-version}` or later of the `eksctl` command line tool installed on your device or {aws} CloudShell. To install or update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. ++ +. Determine the [.noloc]`OIDC` issuer ID for your cluster. ++ +Retrieve your cluster's [.noloc]`OIDC` issuer ID and store it in a variable. Replace [.replaceable]`my-cluster` with your own value. ++ +[source,bash,subs="verbatim,attributes"] +---- +cluster_name=my-cluster +---- +[source,bash,subs="verbatim,attributes"] +---- +oidc_id=$(aws eks describe-cluster --name $cluster_name --query "cluster.identity.oidc.issuer" --output text | cut -d '/' -f 5) +---- +[source,bash,subs="verbatim,attributes"] +---- +echo $oidc_id +---- +. Determine whether an IAM [.noloc]`OIDC` provider with your cluster's issuer ID is already in your account. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam list-open-id-connect-providers | grep $oidc_id | cut -d "/" -f4 +---- ++ +If output is returned, then you already have an IAM [.noloc]`OIDC` provider for your cluster and you can skip the next step. If no output is returned, then you must create an IAM [.noloc]`OIDC` provider for your cluster. +. Create an IAM [.noloc]`OIDC` identity provider for your cluster with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl utils associate-iam-oidc-provider --cluster $cluster_name --approve +---- ++ +NOTE: If you enabled the EKS VPC endpoint, the EKS OIDC service endpoint couldn't be accessed from inside that VPC. Consequently, your operations such as creating an OIDC provider with `eksctl` in the VPC will not work and will result in a timeout when attempting to request `https://oidc.eks.[.replaceable]``region``.amazonaws.com`. An example error message follows: + +[source,bash,subs="verbatim,attributes"] +---- +** server cant find oidc.eks.region.amazonaws.com: NXDOMAIN +---- + +To complete this step, you can run the command outside the VPC, for example in {aws} CloudShell or on a computer connected to the internet. Alternatively, you can create a split-horizon conditional resolver in the VPC, such as Route 53 Resolver to use a different resolver for the OIDC Issuer URL and not use the VPC DNS for it. For an example of conditional forwarding in [.noloc]`CoreDNS`, see the https://github.com/aws/containers-roadmap/issues/2038[Amazon EKS feature request] on [.noloc]`GitHub`. + + +==== Create OIDC provider ({aws} Console) + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. In the left pane, select *Clusters*, and then select the name of your cluster on the *Clusters* page. +. In the *Details* section on the *Overview* tab, note the value of the *OpenID Connect provider URL*. +. Open the IAM console at https://console.aws.amazon.com/iam/. +. In the left navigation pane, choose *Identity Providers* under *Access management*. If a *Provider* is listed that matches the URL for your cluster, then you already have a provider for your cluster. If a provider isn't listed that matches the URL for your cluster, then you must create one. +. To create a provider, choose *Add provider*. +. For *Provider type*, select *[.noloc]`OpenID Connect`*. +. For *Provider URL*, enter the [.noloc]`OIDC` provider URL for your cluster. +. For *Audience*, enter `sts.amazonaws.com`. +. (Optional) Add any tags, for example a tag to identify which cluster is for this provider. +. Choose *Add provider*. + + +Next step: +<> + +[.topic] +[[associate-service-account-role,associate-service-account-role.title]] +=== Assign [.noloc]`IAM` roles to [.noloc]`Kubernetes` service accounts + +[abstract] +-- +Discover how to configure a Kubernetes service account to assume an IAM role, enabling Pods to securely access {aws} services with granular permissions. +-- + +This topic covers how to configure a [.noloc]`Kubernetes` service account to assume an {aws} Identity and Access Management (IAM) role. Any [.noloc]`Pods` that are configured to use the service account can then access any {aws} service that the role has permissions to access. + +==== Prerequisites + +* An existing cluster. If you don't have one, you can create one by following one of the guides in <>. +* An existing IAM [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To learn if you already have one or how to create one, see <>. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* An existing `kubectl` `config` file that contains your cluster configuration. To create a `kubectl` `config` file, see <>. + + +[[irsa-associate-role-procedure,irsa-associate-role-procedure.title]] +==== Step 1: Create IAM Policy + +If you want to associate an existing IAM policy to your IAM role, skip to the next step. + + +. Create an IAM policy. You can create your own policy, or copy an {aws} managed policy that already grants some of the permissions that you need and customize it to your specific requirements. For more information, see link:IAM/latest/UserGuide/access_policies_create.html[Creating IAM policies,type="documentation"] in the _IAM User Guide_. ++ +. Create a file that includes the permissions for the {aws} services that you want your [.noloc]`Pods` to access. For a list of all actions for all {aws} services, see the link:service-authorization/latest/reference/[Service Authorization Reference,type="documentation"]. ++ +You can run the following command to create an example policy file that allows read-only access to an Amazon S3 bucket. You can optionally store configuration information or a bootstrap script in this bucket, and the containers in your [.noloc]`Pod` can read the file from the bucket and load it into your application. If you want to create this example policy, copy the following contents to your device. Replace [.replaceable]`my-pod-secrets-bucket` with your bucket name and run the command. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >my-policy.json <my-service-account.yaml <> for more information. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >trust-relationship.json <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl annotate serviceaccount -n $namespace $service_account eks.amazonaws.com/role-arn={arn-aws}iam::$account_id:role/my-role +---- +. (Optional) <>. {aws} recommends using a regional {aws} STS endpoint instead of the global endpoint. This reduces latency, provides built-in redundancy, and increases session token validity. + + +[[irsa-confirm-role-configuration,irsa-confirm-role-configuration.title]] +==== Step 3: Confirm configuration +. Confirm that the IAM role's trust policy is configured correctly. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam get-role --role-name my-role --query Role.AssumeRolePolicyDocument +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "{arn-aws}iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub": "system:serviceaccount:default:my-service-account", + "oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud": "sts.amazonaws.com" + } + } + } + ] +} +---- +. Confirm that the policy that you attached to your role in a previous step is attached to the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam list-attached-role-policies --role-name my-role --query AttachedPolicies[].PolicyArn --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{arn-aws}iam::111122223333:policy/my-policy +---- +. Set a variable to store the Amazon Resource Name (ARN) of the policy that you want to use. Replace [.replaceable]`my-policy` with the name of the policy that you want to confirm permissions for. ++ +[source,bash,subs="verbatim,attributes"] +---- +export policy_arn={arn-aws}iam::111122223333:policy/my-policy +---- +. View the default version of the policy. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam get-policy --policy-arn $policy_arn +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Policy": { + "PolicyName": "my-policy", + "PolicyId": "EXAMPLEBIOWGLDEXAMPLE", + "Arn": "{arn-aws}iam::111122223333:policy/my-policy", + "Path": "/", + "DefaultVersionId": "v1", + [...] + } +} +---- +. View the policy contents to make sure that the policy includes all the permissions that your [.noloc]`Pod` needs. If necessary, replace [.replaceable]`1` in the following command with the version that's returned in the previous output. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam get-policy-version --policy-arn $policy_arn --version-id v1 +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "{arn-aws}s3:::my-pod-secrets-bucket" + } + ] +} +---- ++ +If you created the example policy in a previous step, then your output is the same. If you created a different policy, then the [.replaceable]`example` content is different. +. Confirm that the [.noloc]`Kubernetes` service account is annotated with the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe serviceaccount my-service-account -n default +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +Name: my-service-account +Namespace: default +Annotations: eks.amazonaws.com/role-arn: {arn-aws}iam::111122223333:role/my-role +Image pull secrets: +Mountable secrets: my-service-account-token-qqjfl +Tokens: my-service-account-token-qqjfl +[...] +---- + + +==== Next steps + +* <> + +[.topic] +[[pod-configuration,pod-configuration.title]] +=== Configure [.noloc]`Pods` to use a [.noloc]`Kubernetes` service account + +[abstract] +-- +Learn how to configure your [.noloc]`Pods` to use a [.noloc]`Kubernetes` service account that you allowed to assume an {aws} Identity and Access Management role. +-- + +If a [.noloc]`Pod` needs to access {aws} services, then you must configure it to use a [.noloc]`Kubernetes` service account. The service account must be associated to an {aws} Identity and Access Management (IAM) role that has permissions to access the {aws} services. + + + +* An existing cluster. If you don't have one, you can create one using one of the guides in <>. +* An existing IAM [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To learn if you already have one or how to create one, see <>. +* An existing [.noloc]`Kubernetes` service account that's associated with an IAM role. The service account must be annotated with the Amazon Resource Name (ARN) of the IAM role. The role must have an associated IAM policy that contains the permissions that you want your [.noloc]`Pods` to have to use {aws} services. For more information about how to create the service account and role, and configure them, see <>. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* An existing `kubectl` `config` file that contains your cluster configuration. To create a `kubectl` `config` file, see <>. +. Use the following command to create a deployment manifest that you can deploy a [.noloc]`Pod` to confirm configuration with. Replace the [.replaceable]`example values` with your own values. ++ +[source,yaml,subs="verbatim,attributes"] +---- +cat >my-deployment.yaml <>. +.. Confirm that the [.noloc]`Pod` has a web identity token file mount. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe pod my-app-6f4dfff6cb-76cv9 | grep AWS_WEB_IDENTITY_TOKEN_FILE: +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +---- ++ +The `kubelet` requests and stores the token on behalf of the [.noloc]`Pod`. By default, the `kubelet` refreshes the token if the token is older than 80 percent of its total time to live or older than 24 hours. You can modify the expiration duration for any account other than the default service account by using the settings in your [.noloc]`Pod` spec. For more information, see https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#serviceaccount-token-volume-projection[Service Account Token Volume Projection] in the [.noloc]`Kubernetes` documentation. ++ +The https://github.com/aws/amazon-eks-pod-identity-webhook#amazon-eks-pod-identity-webhook[Amazon EKS Pod Identity Webhook] on the cluster watches for [.noloc]`Pods` that use a service account with the following annotation: ++ +[source,bash,subs="verbatim,attributes"] +---- +eks.amazonaws.com/role-arn: {arn-aws}iam::111122223333:role/my-role +---- ++ +The webhook applies the previous environment variables to those [.noloc]`Pods`. Your cluster doesn't need to use the webhook to configure the environment variables and token file mounts. You can manually configure [.noloc]`Pods` to have these environment variables. The <> look for these environment variables first in the credential chain provider. The role credentials are used for [.noloc]`Pods` that meet this criteria. +. Confirm that your [.noloc]`Pods` can interact with the {aws} services using the permissions that you assigned in the IAM policy attached to your role. ++ +NOTE: When a [.noloc]`Pod` uses {aws} credentials from an IAM role that's associated with a service account, the {aws} CLI or other SDKs in the containers for that [.noloc]`Pod` use the credentials that are provided by that role. If you don't restrict access to the credentials that are provided to the <>, the [.noloc]`Pod` still has access to these credentials. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. ++ +If your [.noloc]`Pods` can't interact with the services as you expected, complete the following steps to confirm that everything is properly configured. ++ +.. Confirm that your [.noloc]`Pods` use an {aws} SDK version that supports assuming an IAM role through an [.noloc]`OpenID Connect` web identity token file. For more information, see <>. +.. Confirm that the deployment is using the service account. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe deployment my-app | grep "Service Account" +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +Service Account: my-service-account +---- +.. If your [.noloc]`Pods` still can't access services, review the <> that are described in <> to confirm that your role and service account are configured properly. + + +[.topic] +[[configure-sts-endpoint,configure-sts-endpoint.title]] +=== Configure the {aws} Security Token Service endpoint for a service account + +If you're using a [.noloc]`Kubernetes` service account with <>, then you can configure the type of {aws} Security Token Service endpoint that's used by the service account if your cluster and platform version are the same or later than those listed in the following table. If your [.noloc]`Kubernetes` or platform version are earlier than those listed in the table, then your service accounts can only use the global endpoint. + +[cols="1,1,1", options="header"] +|=== +|Kubernetes version +|Platform version +|Default endpoint type + + +|`1.31` +|`eks.4` +|Regional + +|`1.30` +|`eks.2` +|Regional + +|`1.29` +|`eks.1` +|Regional + +|`1.28` +|`eks.1` +|Regional + +|`1.27` +|`eks.1` +|Regional + +|`1.26` +|`eks.1` +|Regional + +|`1.25` +|`eks.1` +|Regional + +|`1.24` +|`eks.2` +|Regional + +|`1.23` +|`eks.1` +|Regional +|=== + +{aws} recommends using the regional {aws} STS endpoints instead of the global endpoint. This reduces latency, provides built-in redundancy, and increases session token validity. The {aws} Security Token Service must be active in the {aws} Region where the [.noloc]`Pod` is running. Moreover, your application must have built-in redundancy for a different {aws} Region in the event of a failure of the service in the {aws} Region. For more information, see link:IAM/latest/UserGuide/id_credentials_temp_enable-regions.html[Managing {aws} STS in an {aws} Region,type="documentation"] in the IAM User Guide. + + + +* An existing cluster. If you don't have one, you can create one using one of the guides in <>. +* An existing IAM OIDC provider for your cluster. For more information, see <>. +* An existing [.noloc]`Kubernetes` service account configured for use with the <> feature. + +The following examples all use the aws-node [.noloc]`Kubernetes` service account used by the <>. You can replace the [.replaceable]`example values` with your own service accounts, [.noloc]`Pods`, namespaces, and other resources. + +. Select a [.noloc]`Pod` that uses a service account that you want to change the endpoint for. Determine which {aws} Region that the [.noloc]`Pod` runs in. Replace [.replaceable]`aws-node-6mfgv` with your [.noloc]`Pod` name and [.replaceable]`kube-system` with your [.noloc]`Pod's` namespace. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe pod aws-node-6mfgv -n kube-system |grep Node: +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +ip-192-168-79-166.us-west-2/192.168.79.166 +---- ++ +In the previous output, the [.noloc]`Pod` is running on a node in the us-west-2 {aws} Region. +. Determine the endpoint type that the [.noloc]`Pod's` service account is using. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe pod aws-node-6mfgv -n kube-system |grep AWS_STS_REGIONAL_ENDPOINTS +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +AWS_STS_REGIONAL_ENDPOINTS: regional +---- ++ +If the current endpoint is global, then `global` is returned in the output. If no output is returned, then the default endpoint type is in use and has not been overridden. +. If your cluster or platform version are the same or later than those listed in the table, then you can change the endpoint type used by your service account from the default type to a different type with one of the following commands. Replace [.replaceable]`aws-node` with the name of your service account and [.replaceable]`kube-system` with the namespace for your service account. ++ +** If your default or current endpoint type is global and you want to change it to regional: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl annotate serviceaccount -n kube-system aws-node eks.amazonaws.com/sts-regional-endpoints=true +---- +// Not using [.noloc]`Pods'` because the ' character seems to mess up the processing. ++ +If you're using <> to generate pre-signed S3 URLs in your application running in Pods' containers, the format of the URL for regional endpoints is similar to the following example: ++ +[source,none,subs="verbatim,attributes"] +---- +https://bucket.s3.us-west-2.amazonaws.com/path?...&X-Amz-Credential=your-access-key-id/date/us-west-2/s3/aws4_request&... +---- +** If your default or current endpoint type is regional and you want to change it to global: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl annotate serviceaccount -n kube-system aws-node eks.amazonaws.com/sts-regional-endpoints=false +---- ++ +If your application is explicitly making requests to {aws} STS global endpoints and you don't override the default behavior of using regional endpoints in Amazon EKS clusters, then requests will fail with an error. For more information, see <>. +// Not using [.noloc]`Pods'` because the ' character seems to mess up the processing. ++ +If you're using <> to generate pre-signed S3 URLs in your application running in Pods' containers, the format of the URL for global endpoints is similar to the following example: ++ +[source,none,subs="verbatim,attributes"] +---- +https://bucket.s3.amazonaws.com/path?...&X-Amz-Credential=your-access-key-id/date/us-west-2/s3/aws4_request&... +---- + ++ +If you have automation that expects the pre-signed URL in a certain format or if your application or downstream dependencies that use pre-signed URLs have expectations for the {aws} Region targeted, then make the necessary changes to use the appropriate {aws} STS endpoint. +. Delete and re-create any existing [.noloc]`Pods` that are associated with the service account to apply the credential environment variables. The mutating web hook doesn't apply them to [.noloc]`Pods` that are already running. You can replace [.replaceable]`Pods`, [.replaceable]`kube-system`, and [.replaceable]`-l k8s-app=aws-node` with the information for the [.noloc]`Pods` that you set your annotation for. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete Pods -n kube-system -l k8s-app=aws-node +---- +. Confirm that the all [.noloc]`Pods` restarted. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get Pods -n kube-system -l k8s-app=aws-node +---- +. View the environment variables for one of the [.noloc]`Pods`. Verify that the `AWS_STS_REGIONAL_ENDPOINTS` value is what you set it to in a previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe pod aws-node-kzbtr -n kube-system |grep AWS_STS_REGIONAL_ENDPOINTS +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +AWS_STS_REGIONAL_ENDPOINTS=regional +---- + + +[.topic] +[[cross-account-access,cross-account-access.title]] +=== Authenticate to another account with IRSA + +[abstract] +-- +Learn how to configure cross-account IAM permissions for Amazon EKS clusters by creating an identity provider from another account's cluster or using chained AssumeRole operations, enabling secure access to {aws} resources across multiple accounts. +-- + +You can configure cross-account IAM permissions either by creating an identity provider from another account's cluster or by using chained `AssumeRole` operations. In the following examples, _Account A_ owns an Amazon EKS cluster that supports IAM roles for service accounts. [.noloc]`Pods` that are running on that cluster must assume IAM permissions from _Account B_. + +.Create an identity provider from another account's cluster +==== + +==== + +==== + +In this example, Account A provides Account B with the OpenID Connect (OIDC) issuer URL from their cluster. Account B follows the instructions in <> and <> using the OIDC issuer URL from Account A's cluster. Then, a cluster administrator annotates the service account in Account A's cluster to use the role from Account B ([.replaceable]`444455556666`). + +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + eks.amazonaws.com/role-arn: {arn-aws}iam::444455556666:role/account-b-role +---- + +==== + +.Use chained `AssumeRole` operations +==== + +==== + +==== + +In this example, Account B creates an IAM policy with the permissions to give to [.noloc]`Pods` in Account A's cluster. Account B ([.replaceable]`444455556666`) attaches that policy to an IAM role with a trust relationship that allows `AssumeRole` permissions to Account A ([.replaceable]`111122223333`). + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "{aws}": "{arn-aws}iam::111122223333:root" + }, + "Action": "sts:AssumeRole", + "Condition": {} + } + ] +} +---- + +Account A creates a role with a trust policy that gets credentials from the identity provider created with the cluster's OIDC issuer address. + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "{arn-aws}iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE" + }, + "Action": "sts:AssumeRoleWithWebIdentity" + } + ] +} +---- + +Account A attaches a policy to that role with the following permissions to assume the role that Account B created. + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Resource": "{arn-aws}iam::444455556666:role/account-b-role" + } + ] +} +---- + +The application code for [.noloc]`Pods` to assume Account B's role uses two profiles: `account_b_role` and `account_a_role`. The `account_b_role` profile uses the `account_a_role` profile as its source. For the {aws} CLI, the `~/.aws/config` file is similar to the following. + +[source,none,subs="verbatim,attributes"] +---- +[profile account_b_role] +source_profile = account_a_role +role_arn={arn-aws}iam::444455556666:role/account-b-role + +[profile account_a_role] +web_identity_token_file = /var/run/secrets/eks.amazonaws.com/serviceaccount/token +role_arn={arn-aws}iam::111122223333:role/account-a-role +---- + +To specify chained profiles for other {aws} SDKs, consult the documentation for the SDK that you're using. For more information, see link:developer/tools/[Tools to Build on {aws},type="marketing"]. + +==== + +[.topic] +[[iam-roles-for-service-accounts-minimum-sdk,iam-roles-for-service-accounts-minimum-sdk.title]] +=== Use IRSA with the {aws} SDK + +.Using the credentials +To use the credentials from IAM roles for service accounts, your code can use any {aws} SDK to create a client for an {aws} service with an SDK, and by default the SDK searches in a chain of locations for {aws} Identity and Access Management credentials to use. The IAM roles for service accounts credentials will be used if you don't specify a credential provider when you create the client or otherwise initialized the SDK. + +This works because IAM roles for service accounts have been added as a step in the default credential chain. If your workloads currently use credentials that are earlier in the chain of credentials, those credentials will continue to be used even if you configure an IAM roles for service accounts for the same workload. + +The SDK automatically exchanges the service account [.noloc]`OIDC` token for temporary credentials from {aws} Security Token Service by using the `AssumeRoleWithWebIdentity` action. Amazon EKS and this SDK action continue to rotate the temporary credentials by renewing them before they expire. + +When using <>, the containers in your [.noloc]`Pods` must use an {aws} SDK version that supports assuming an IAM role through an [.noloc]`OpenID Connect` web identity token file. Make sure that you're using the following versions, or later, for your {aws} SDK: + + + +* Java (Version 2) – https://github.com/aws/aws-sdk-java-v2/releases/tag/2.10.11[2.10.11] +* Java – https://github.com/aws/aws-sdk-java/releases/tag/1.11.704[1.11.704] +* Go – https://github.com/aws/aws-sdk-go/releases/tag/v1.23.13[1.23.13] +* Python (Boto3) – https://github.com/boto/boto3/releases/tag/1.9.220[1.9.220] +* Python (botocore) – https://github.com/boto/botocore/releases/tag/1.12.200[1.12.200] +* {aws} CLI – https://github.com/aws/aws-cli/releases/tag/1.16.232[1.16.232] +* Node – https://github.com/aws/aws-sdk-js/releases/tag/v2.525.0[2.525.0] and https://github.com/aws/aws-sdk-js-v3/releases/tag/v3.27.0[3.27.0] +* Ruby – https://github.com/aws/aws-sdk-ruby/blob/version-3/gems/aws-sdk-core/CHANGELOG.md#3580-2019-07-01[3.58.0] +* {cpp} – https://github.com/aws/aws-sdk-cpp/releases/tag/1.7.174[1.7.174] +* .NET – https://github.com/aws/aws-sdk-net/releases/tag/3.3.659.1[3.3.659.1] – You must also include `AWSSDK.SecurityToken`. +* PHP – https://github.com/aws/aws-sdk-php/releases/tag/3.110.7[3.110.7] + +Many popular [.noloc]`Kubernetes` add-ons, such as the https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler[Cluster Autoscaler], the <>, and the <> support IAM roles for service accounts. + +To ensure that you're using a supported SDK, follow the installation instructions for your preferred SDK at link:tools/[Tools to Build on {aws},type="marketing"] when you build your containers. + +[.topic] +[[irsa-fetch-keys,irsa-fetch-keys.title]] +=== Fetch signing keys to validate [.noloc]`OIDC` tokens + +[abstract] +-- +Discover how to fetch the OIDC public signing keys (JSON Web Key Set) required to validate the ProjectedServiceAccountToken for Amazon EKS clusters, enabling external systems to authenticate with IAM roles for Kubernetes service accounts. +-- + +[.noloc]`Kubernetes` issues a `ProjectedServiceAccountToken` to each [.noloc]`Kubernetes` [.noloc]`Service Account`. This token is an [.noloc]`OIDC` token, which is further a type of [.noloc]`JSON web token (JWT)`. Amazon EKS hosts a public [.noloc]`OIDC` endpoint for each cluster that contains the signing keys for the token so external systems can validate it. + +To validate a `ProjectedServiceAccountToken`, you need to fetch the [.noloc]`OIDC` public signing keys, also called the [.noloc]`JSON Web Key Set (JWKS)`. Use these keys in your application to validate the token. For example, you can use the https://pyjwt.readthedocs.io/en/latest/[PyJWT Python library] to validate tokens using these keys. For more information on the `ProjectedServiceAccountToken`, see <>. + +==== Prerequisites + +* An existing {aws} Identity and Access Management (IAM) [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you already have one, or to create one, see <>. +* *{aws} CLI* -- A command line tool for working with {aws} services, including Amazon EKS. For more information, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. After installing the {aws} CLI, we recommend that you also configure it. For more information, see link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the {aws} Command Line Interface User Guide. + +==== Procedure + +. Retrieve the [.noloc]`OIDC` URL for your Amazon EKS cluster using the {aws} CLI. ++ +[source,bash,subs="verbatim,attributes"] +---- +$ aws eks describe-cluster --name my-cluster --query 'cluster.identity.oidc.issuer' +"https://oidc.eks.us-west-2.amazonaws.com/id/8EBDXXXX00BAE" +---- +. Retrieve the public signing key using [.noloc]`curl`, or a similar tool. The result is a https://www.rfc-editor.org/rfc/rfc7517#section-5[JSON Web Key Set (JWKS)]. ++ +IMPORTANT: Amazon EKS throttles calls to the [.noloc]`OIDC` endpoint. You should cache the public signing key. Respect the `cache-control` header included in the response. ++ +IMPORTANT: Amazon EKS rotates the [.noloc]`OIDC` signing key every seven days. ++ +[source,bash,subs="verbatim,attributes"] +---- +$ curl https://oidc.eks.us-west-2.amazonaws.com/id/8EBDXXXX00BAE/keys +{"keys":[{"kty":"RSA","kid":"2284XXXX4a40","use":"sig","alg":"RS256","n":"wklbXXXXMVfQ","e":"AQAB"}]} +---- diff --git a/latest/ug/manage-access/cluster-auth.adoc b/latest/ug/manage-access/cluster-auth.adoc new file mode 100644 index 00000000..af75691b --- /dev/null +++ b/latest/ug/manage-access/cluster-auth.adoc @@ -0,0 +1,83 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[.topic] +[[cluster-auth,cluster-auth.title]] += Learn how access control works in Amazon EKS +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Learn how access control works in Amazon EKS +:info_titleabbrev: Manage access +:info_abstract: Learn how to manage access to your EKS cluster. First, this includes granting \ + developers or external services access to Kubernetes. Second, this includes granting \ + Kubernetes workloads access to {aws} services. + +[abstract] +-- +Learn how to manage access to your EKS cluster. First, this includes granting developers or external services access to Kubernetes. Second, this includes granting Kubernetes workloads access to {aws} services. +-- + +Learn how to manage access to your Amazon EKS cluster. Using Amazon EKS requires knowledge of how both [.noloc]`Kubernetes` and {aws} Identity and Access Management ({aws} IAM) handle access control. + +*This section includes:* + +*xref:grant-k8s-access[Grant IAM users and roles access to Kubernetes APIs,linkend=grant-k8s-access]* -- Learn how to enable applications or users to authenticate to the [.noloc]`Kubernetes` API. You can use access entries, the aws-auth ConfigMap, or an external OIDC provider. + +*<>* -- Learn how to configure the {aws-management-console} to communicate with your Amazon EKS cluster. Use the console to view [.noloc]`Kubernetes` resources in the cluster, such as namespaces, nodes, and [.noloc]`Pods`. + +*<>* -- Learn how to configure kubectl to communicate with your Amazon EKS cluster. Use the {aws} CLI to create a kubeconfig file. + +*xref:service-accounts[Grant Kubernetes workloads access to {aws} using Kubernetes Service Accounts,linkend=service-accounts]* -- Learn how to associate a [.noloc]`Kubernetes` service account with {aws} IAM Roles. You can use Pod Identity or IAM Roles for Service Accounts (IRSA). + +== Common Tasks + + +* Grant developers access to the [.noloc]`Kubernetes` API. View [.noloc]`Kubernetes` resources in the {aws-management-console}. ++ +** Solution: <> to associate [.noloc]`Kubernetes` RBAC permissions with {aws} IAM Users or Roles. +* Configure kubectl to talk to an Amazon EKS cluster using {aws} Credentials. ++ +** Solution: Use the {aws} CLI to <>. +* Use an external identity provider, such as Ping Identity, to authenticate users to the [.noloc]`Kubernetes` API. ++ +** Solution: <>. +* Grant workloads on your [.noloc]`Kubernetes` cluster the ability to call {aws} APIs. ++ +** Solution: <> to associate an {aws} IAM Role to a [.noloc]`Kubernetes` Service Account. + +== Background + + + +* https://kubernetes.io/docs/concepts/security/service-accounts/[Learn how Kubernetes Service Accounts work.] +* https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Review the Kubernetes Role Based Access Control (RBAC) Model] +* For more information about managing access to {aws} resources, see the link:IAM/latest/UserGuide/intro-structure.html[{aws} IAM User Guide,type="documentation"]. Alternatively, take a free https://explore.skillbuilder.aws/learn/course/external/view/elearning/120/introduction-to-aws-identity-and-access-management-iam[introductory training on using {aws} IAM]. + +== Considerations for EKS Auto Mode + +EKS Auto Mode integrates with EKS Pod Identity and EKS EKS access entries. + +* EKS Auto Mode uses access entries to grant the EKS control plane Kubernetes permissions. For example, the access policies enable EKS Auto Mode to read information about network endpoints and services. +** You cannot disable access entries on an EKS Auto Mode cluster. +** You can optionally enable the `aws-auth` `ConfigMap`. +** The access entries for EKS Auto Mode are automatically configured. You can view these access entries, but you cannot modify them. +** If you use a NodeClass to create a custom Node IAM Role, you need to create an access entry for the role using the AmazonEKSAutoNodePolicy access policy. +* If you want to grant workloads permissions for {aws} services, use EKS Pod Identity. +** You do not need to install the Pod Identity agent on EKS Auto Mode clusters. + +include::k8s-access/grant-k8s-access.adoc[leveloffset=+1] + + +include::view-kubernetes-resources.adoc[leveloffset=+1] + + +include::create-kubeconfig.adoc[leveloffset=+1] + + +include::aws-access/service-accounts.adoc[leveloffset=+1] diff --git a/latest/ug/manage-access/create-kubeconfig.adoc b/latest/ug/manage-access/create-kubeconfig.adoc new file mode 100644 index 00000000..7c480d36 --- /dev/null +++ b/latest/ug/manage-access/create-kubeconfig.adoc @@ -0,0 +1,66 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[create-kubeconfig,create-kubeconfig.title]] += Connect [.noloc]`kubectl` to an EKS cluster by creating a [.noloc]`kubeconfig` file +:info_doctype: section +:info_title: Connect kubectl to an EKS cluster by creating a kubeconfig file +:info_titleabbrev: Access cluster with kubectl +:info_abstract: Learn how to create or update a kubeconfig file for authenticating with your Amazon EKS cluster using kubectl. Follow prerequisites for required tools and permissions. + +[abstract] +-- +Learn how to create or update a kubeconfig file for authenticating with your Amazon EKS cluster using kubectl. Follow prerequisites for required tools and permissions. +-- + +In this topic, you create a `kubeconfig` file for your cluster (or update an existing one). + +The `kubectl` command-line tool uses configuration information in `kubeconfig` files to communicate with the API server of a cluster. For more information, see https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/[Organizing Cluster Access Using kubeconfig Files] in the [.noloc]`Kubernetes` documentation. + +Amazon EKS uses the `aws eks get-token` command with `kubectl` for cluster authentication. By default, the {aws} CLI uses the same credentials that are returned with the following command: + +[source,bash,subs="verbatim,attributes"] +---- +aws sts get-caller-identity +---- + +* An existing Amazon EKS cluster. To deploy one, see <>. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* An IAM user or role with permission to use the `eks:DescribeCluster` API action for the cluster that you specify. For more information, see <>. If you use an identity from your own [.noloc]`OpenID Connect` provider to access your cluster, then see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-kubectl[Using kubectl] in the [.noloc]`Kubernetes` documentation to create or update your `kube config` file. + + +[[create-kubeconfig-automatically,create-kubeconfig-automatically.title]] +== Create `kubeconfig` file automatically + +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* Permission to use the `eks:DescribeCluster` API action for the cluster that you specify. For more information, see <>. +. Create or update a `kubeconfig` file for your cluster. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in and replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-kubeconfig --region region-code --name my-cluster +---- ++ +By default, the resulting configuration file is created at the default `kubeconfig` path (`.kube`) in your home directory or merged with an existing `config` file at that location. You can specify another path with the `--kubeconfig` option. ++ +You can specify an IAM role ARN with the `--role-arn` option to use for authentication when you issue `kubectl` commands. Otherwise, the link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] in your default {aws} CLI or SDK credential chain is used. You can view your default {aws} CLI or SDK identity by running the `aws sts get-caller-identity` command. ++ +For all available options, run the `aws eks update-kubeconfig help` command or see link:cli/latest/reference/eks/update-kubeconfig.html[update-kubeconfig,type="documentation"] in the _{aws} CLI Command Reference_. +. Test your configuration. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get svc +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +svc/kubernetes ClusterIP 10.100.0.1 443/TCP 1m +---- ++ +If you receive any authorization or resource type errors, see <> in the troubleshooting topic. diff --git a/latest/ug/manage-access/k8s-access/access-entries.adoc b/latest/ug/manage-access/k8s-access/access-entries.adoc new file mode 100644 index 00000000..7cf97db0 --- /dev/null +++ b/latest/ug/manage-access/k8s-access/access-entries.adoc @@ -0,0 +1,319 @@ +//!!NODE_ROOT
+ +[.topic] +[[access-entries,access-entries.title]] += Grant [.noloc]`IAM` users access to [.noloc]`Kubernetes` with EKS access entries +:info_doctype: section + +include::../../attributes.txt[] + + +include::access-policies.adoc[leveloffset=+1] + +include::migrating-access-entries.adoc[leveloffset=+1] + +include::access-policy-reference.adoc[leveloffset=+1] + +[abstract] +-- +Learn how to manage access entries for IAM principals to your Amazon EKS cluster, including creating, updating, and deleting access entries for fine-grained authentication and authorization. +-- + +*What is EKS access entries?* + +EKS access entries it the best way to grant users access to the Kubernetes API. For example, you can use access entries to grant developers access to use kubectl. + +Fundamentally, an EKS access entry associates a set of Kubernetes permissions with an IAM identity, such as an IAM role. For example, a develoer may assume an IAM role and use that to authenticate to an EKS Cluster. + +You can attach Kubernetes permissions to access entries in two ways: + +* Use an access policy. Access policies are pre-defined Kubernetes permissions templates maintained by {aws}. For more information, see <>. +* Reference a Kubernetes group. If you associate an IAM Identity with a Kubernetes group, you can create Kubernetes resources that grant the group permissions. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the Kubernetes documentation. + +*Advantages* + +Amazon EKS cluster access management enables you to control authentication and authorization for your Kubernetes clusters directly through Amazon EKS APIs. This feature simplifies access management by eliminating the need to switch between {aws} and Kubernetes APIs when managing user permissions. Using access entries and access policies, you can define granular permissions for {aws} IAM principals, including the ability to modify or revoke cluster-admin permissions from the cluster creator. + +The feature integrates with infrastructure as code (IaC) tools like {aws} CloudFormation, Terraform, and {aws} CDK, allowing you to define access configurations during cluster creation. If misconfigurations occur, you can restore cluster access through the Amazon EKS API without requiring direct Kubernetes API access. This centralized approach reduces operational overhead and improves security by leveraging existing {aws} IAM capabilities such as CloudTrail audit logging and multi-factor authentication. + +== Get Started + +. Determine the IAM Identity and Access policy you want to use. +** <> +. Enable EKS Access Entries on your cluster. Confirm you have a supported platform version. +** <> +. Create an access entry that associates an IAM Identity with Kubernetes permission. +** <> +. Authenticate to the cluster using the IAM identity. +** <> +** <> + +== Legacy cluster access configuration + +When you enable EKS access entries on clusters created before this feature was introduced (clusters with initial platform versions earlier than those specified in Platform Version Requirements), EKS automatically creates an access entry that reflects pre-existing permissions. +This access entry shows: + +* The IAM identity that originally created the cluster +* The administrative permissions granted to that identity during cluster creation + +NOTE: Previously, this administrative access was granted automatically and couldn't be modified. With EKS access entries enabled, you can now view and delete this legacy access configuration. + + +[.topic] +[[setting-up-access-entries,setting-up-access-entries.title]] +== Change authentication mode to use access entries + +To begin using access entries, you must change the authentication mode of the cluster to either the `API_AND_CONFIG_MAP` or `API` modes. This adds the API for access entries. + +[[access-entries-setup-console,access-entries-setup-console.title]] +=== {aws} Console + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster that you want to create an access entry in. +. Choose the *Access* tab. +. The *Authentication mode* shows the current authentication mode of the cluster. If the mode says [.noloc]`EKS API`, you can already add access entries and you can skip the remaining steps. +. Choose *Manage access*. +. For *Cluster authentication mode*, select a mode with the [.noloc]`EKS API`. Note that you can't change the authentication mode back to a mode that removes the [.noloc]`EKS API` and access entries. +. Choose *Save changes*. Amazon EKS begins to update the cluster, the status of the cluster changes to [.noloc]`Updating`, and the change is recorded in the *Update history* tab. +. Wait for the status of the cluster to return to [.noloc]`Active`. When the cluster is [.noloc]`Active`, you can follow the steps in <> to add access to the cluster for IAM principals. + +[[access-setup-cli,access-setup-cli.title]] +=== {aws} CLI + +. Install the {aws} CLI, as described in link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] in the _{aws} Command Line Interface User Guide_. +. Run the following command. Replace [.replaceable]`my-cluster` with the name of your cluster. If you want to disable the `ConfigMap` method permanently, replace `API_AND_CONFIG_MAP` with `API`. ++ +Amazon EKS begins to update the cluster, the status of the cluster changes to [.noloc]`UPDATING`, and the change is recorded in the [command]*aws eks list-updates*. ++ +[source,bash] +---- +aws eks update-cluster-config --name my-cluster --access-config authenticationMode=API_AND_CONFIG_MAP +---- +. Wait for the status of the cluster to return to [.noloc]`Active`. When the cluster is [.noloc]`Active`, you can follow the steps in <> to add access to the cluster for IAM principals. + + +=== Required platform version + +To use _access entries_, the cluster must have a platform version that is the same or later than the version listed in the following table, or a [.noloc]`Kubernetes` version that is later than the versions listed in the table. If your Kubernetes version is not listed, all platform versions support access entries. + +[cols="1,1", options="header"] +|=== +|Kubernetes version +|Platform version + + +|`1.30` +|`eks.2` + +|`1.29` +|`eks.1` + +|`1.28` +|`eks.6` + +|`1.27` +|`eks.10` + +|`1.26` +|`eks.11` + +|`1.25` +|`eks.12` + +|`1.24` +|`eks.15` + +|`1.23` +|`eks.17` +|=== + +For more information, see <>. + + +[.topic] +[[creating-access-entries,creating-access-entries.title]] +== Create access entries + + +Before creating access entries, consider the following: + +* A properly set authentication mode. See <>. +* An _access entry_ includes the Amazon Resource Name (ARN) of one, and only one, existing IAM principal. An IAM principal can't be included in more than one access entry. Additional considerations for the ARN that you specify: ++ +** IAM best practices recommend accessing your cluster using IAM _roles_ that have short-term credentials, rather than IAM _users_ that have long-term credentials. For more information, see link:IAM/latest/UserGuide/best-practices.html#bp-users-federation-idp[Require human users to use federation with an identity provider to access {aws} using temporary credentials,type="documentation"] in the _IAM User Guide_. +** If the ARN is for an IAM role, it _can_ include a path. ARNs in `aws-auth` `ConfigMap` entries, _can't_ include a path. For example, your ARN can be `{arn-aws}iam::<111122223333>:role/` or `{arn-aws}iam::<111122223333>:role/`. +** If the type of the access entry is anything other than `STANDARD` (see next consideration about types), the ARN must be in the same {aws} account that your cluster is in. If the type is `STANDARD`, the ARN can be in the same, or different, {aws} account than the account that your cluster is in. +** You can't change the IAM principal after the access entry is created. +** If you ever delete the IAM principal with this ARN, the access entry isn't automatically deleted. We recommend that you delete the access entry with an ARN for an IAM principal that you delete. If you don't delete the access entry and ever recreate the IAM principal, even if it has the same ARN, the access entry won't work. This is because even though the ARN is the same for the recreated IAM principal, the `roleID` or `userID` (you can see this with the `aws sts get-caller-identity` {aws} CLI command) is different for the recreated IAM principal than it was for the original IAM principal. Even though you don't see the IAM principal's `roleID` or `userID` for an access entry, Amazon EKS stores it with the access entry. +* Each access entry has a _type_. You can specify `EC2_LINUX` (for an IAM role used with Linux or Bottlerocket self-managed nodes), `EC2_Windows` (for an IAM role used with Windows self-managed nodes), `FARGATE_LINUX` (for an IAM role used with {aws} Fargate (Fargate)), `HYBRID_LINUX` (for an IAM role used with hybrid nodes) or `STANDARD` as a type. If you don't specify a type, Amazon EKS automatically sets the type to `STANDARD`. It's unnecessary to create an access entry for an IAM role that's used for a managed node group or a Fargate profile. EKS will create access entries (if enabled), or update the auth config map (if access entries are unavailable). ++ +You can't change the type after the access entry is created. +* If the type of the access entry is `STANDARD`, you can specify a _username_ for the access entry. If you don't specify a value for username, Amazon EKS sets one of the following values for you, depending on the type of the access entry and whether the IAM principal that you specified is an IAM role or IAM user. Unless you have a specific reason for specifying your own username, we recommend that don't specify one and let Amazon EKS auto-generate it for you. If you specify your own username: ++ +** It can't start with `system:`, `eks:`, `aws:`, `amazon:`, or `iam:`. +** If the username is for an IAM role, we recommend that you add `{{SessionName}}` to the end of your username. If you add `{{SessionName}}` to your username, the username must include a colon _before_ {{SessionName}}. When this role is assumed, the name of the session specified when assuming the role is automatically passed to the cluster and will appear in CloudTrail logs. For example, you can't have a username of `john{{SessionName}}`. The username would have to be `:john{{SessionName}}` or `jo:hn{{SessionName}}`. The colon only has to be before `{{SessionName}}`. The username generated by Amazon EKS in the following table includes an ARN. Since an ARN includes colons, it meets this requirement. The colon isn't required if you don't include `{{SessionName}}` in your username. Note that the special character "@" is replaced with "-" in the session name. ++ +[cols="1,1,1", options="header"] +|=== +|IAM principal type +|Type +|Username value that Amazon EKS automatically sets + + +|User +|`STANDARD` +|The ARN of the user. Example: `{arn-aws}iam::<111122223333>:user/` + +|Role +|`STANDARD` +|The STS ARN of the role when it's assumed. Amazon EKS appends `{{SessionName}}` to the role. + +Example: `{arn-aws}sts::<111122223333>:assumed-role//{{SessionName}}` + +If the ARN of the role that you specified contained a path, Amazon EKS removes it in the generated username. + +|Role +|`EC2_LINUX` or `EC2_Windows` +|`system:node:{{EC2PrivateDNSName}}` + +|Role +|`FARGATE_LINUX` +|`system:node:{{SessionName}}` + +|Role +|`HYBRID_LINUX` +|`system:node:{{SessionName}}` +|=== ++ +You can change the username after the access entry is created. +* If an access entry's type is `STANDARD`, and you want to use [.noloc]`Kubernetes` RBAC authorization, you can add one or more _group names_ to the access entry. After you create an access entry you can add and remove group names. For the IAM principal to have access to [.noloc]`Kubernetes` objects on your cluster, you must create and manage [.noloc]`Kubernetes` role-based authorization (RBAC) objects. Create [.noloc]`Kubernetes` `RoleBinding` or `ClusterRoleBinding` objects on your cluster that specify the group name as a `subject` for `kind: Group`. [.noloc]`Kubernetes` authorizes the IAM principal access to any cluster objects that you've specified in a [.noloc]`Kubernetes` `Role` or `ClusterRole` object that you've also specified in your binding's `roleRef`. If you specify group names, we recommend that you're familiar with the [.noloc]`Kubernetes` role-based authorization (RBAC) objects. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the [.noloc]`Kubernetes` documentation. ++ +IMPORTANT: Amazon EKS doesn't confirm that any [.noloc]`Kubernetes` RBAC objects that exist on your cluster include any of the group names that you specify. For example, if you create an access entry for group that currently doesn't exist, EKS will create the group instead of returning an error. ++ +Instead of, or in addition to, [.noloc]`Kubernetes` authorizing the IAM principal access to [.noloc]`Kubernetes` objects on your cluster, you can associate Amazon EKS _access policies_ to an access entry. Amazon EKS authorizes IAM principals to access [.noloc]`Kubernetes` objects on your cluster with the permissions in the access policy. You can scope an access policy's permissions to [.noloc]`Kubernetes` namespaces that you specify. Use of access policies don't require you to manage [.noloc]`Kubernetes` RBAC objects. For more information, see <>. +* If you create an access entry with type `EC2_LINUX` or `EC2_Windows`, the IAM principal creating the access entry must have the `iam:PassRole` permission. For more information, see link:IAM/latest/UserGuide/id_roles_use_passrole.html[Granting a user permissions to pass a role to an {aws} service,type="documentation"] in the _IAM User Guide_. +* Similar to standard link:IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency[IAM behavior,type="documentation"], access entry creation and updates are eventually consistent, and may take several seconds to be effective after the initial API call returns successfully. You must design your applications to account for these potential delays. We recommend that you don't include access entry creates or updates in the critical, high- availability code paths of your application. Instead, make changes in a separate initialization or setup routine that you run less frequently. Also, be sure to verify that the changes have been propagated before production workflows depend on them. +* Access entries do not support link:IAM/latest/UserGuide/using-service-linked-roles.html[service linked roles,type="documentation"]. You cannot create access entries where the principal ARN is a service linked role. You can identify service linked roles by their ARN, which is in the format `{arn-aws}iam::*:role/aws-service-role/*`. + +You can create an access entry using the {aws-management-console} or the {aws} CLI. + + +[[access-create-console,access-create-console.title]] +=== {aws-management-console} +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster that you want to create an access entry in. +. Choose the *Access* tab. +. Choose *Create access entry*. +. For *IAM principal*, select an existing IAM role or user. IAM best practices recommend accessing your cluster using IAM _roles_ that have short-term credentials, rather than IAM _users_ that have long-term credentials. For more information, see link:IAM/latest/UserGuide/best-practices.html#bp-users-federation-idp[Require human users to use federation with an identity provider to access {aws} using temporary credentials,type="documentation"] in the _IAM User Guide_. +. For *Type*, if the access entry is for the node role used for self-managed Amazon EC2 nodes, select *EC2 Linux* or *EC2 Windows*. Otherwise, accept the default (*Standard*). +. If the *Type* you chose is *Standard* and you want to specify a *Username*, enter the username. +. If the *Type* you chose is *Standard* and you want to use [.noloc]`Kubernetes` RBAC authorization for the IAM principal, specify one or more names for *Groups*. If you don't specify any group names and want to use Amazon EKS authorization, you can associate an access policy in a later step, or after the access entry is created. +. (Optional) For *Tags*, assign labels to the access entry. For example, to make it easier to find all resources with the same tag. +. Choose *Next*. +. On the *Add access policy* page, if the type you chose was *Standard* and you want Amazon EKS to authorize the IAM principal to have permissions to the [.noloc]`Kubernetes` objects on your cluster, complete the following steps. Otherwise, choose *Next*. ++ +.. For *Policy name*, choose an access policy. You can't view the permissions of the access policies, but they include similar permissions to those in the [.noloc]`Kubernetes` user-facing `ClusterRole` objects. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles[User-facing roles] in the [.noloc]`Kubernetes` documentation. +.. Choose one of the following options: ++ +*** *Cluster* – Choose this option if you want Amazon EKS to authorize the IAM principal to have the permissions in the access policy for all [.noloc]`Kubernetes` objects on your cluster. +*** *[.noloc]`Kubernetes` namespace* – Choose this option if you want Amazon EKS to authorize the IAM principal to have the permissions in the access policy for all [.noloc]`Kubernetes` objects in a specific [.noloc]`Kubernetes` namespace on your cluster. For *Namespace*, enter the name of the [.noloc]`Kubernetes` namespace on your cluster. If you want to add additional namespaces, choose *Add new namespace* and enter the namespace name. +.. If you want to add additional policies, choose *Add policy*. You can scope each policy differently, but you can add each policy only once. +.. Choose *Next*. +. Review the configuration for your access entry. If anything looks incorrect, choose *Previous* to go back through the steps and correct the error. If the configuration is correct, choose *Create*. + +[[access-create-cli,access-create-cli.title]] +=== {aws} CLI + +. Install the {aws} CLI, as described in link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. +. To create an access entry +You can use any of the following examples to create access entries: ++ +** Create an access entry for a self-managed Amazon EC2 Linux node group. Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your {aws} account ID, and [.replaceable]`EKS-my-cluster-self-managed-ng-1` with the name of your link:eks/latest/userguide/create-node-role.html[node IAM role,type="documentation"]. If your node group is a Windows node group, then replace [.replaceable]`EC2_LINUX` with `EC2_Windows`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-access-entry --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/EKS-my-cluster-self-managed-ng-1 --type EC2_LINUX +---- ++ +You can't use the `--kubernetes-groups` option when you specify a type other than `STANDARD`. You can't associate an access policy to this access entry, because its type is a value other than `STANDARD`. +** Create an access entry that allows an IAM role that's not used for an Amazon EC2 self-managed node group, that you want [.noloc]`Kubernetes` to authorize access to your cluster with. Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your {aws} account ID, and [.replaceable]`my-role` with the name of your IAM role. Replace [.replaceable]`Viewers` with the name of a group that you've specified in a [.noloc]`Kubernetes` `RoleBinding` or `ClusterRoleBinding` object on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-access-entry --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/my-role --type STANDARD --user Viewers --kubernetes-groups Viewers +---- +** Create an access entry that allows an IAM user to authenticate to your cluster. This example is provided because this is possible, though IAM best practices recommend accessing your cluster using IAM _roles_ that have short-term credentials, rather than IAM _users_ that have long-term credentials. For more information, see link:IAM/latest/UserGuide/best-practices.html#bp-users-federation-idp[Require human users to use federation with an identity provider to access {aws} using temporary credentials,type="documentation"] in the _IAM User Guide_. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-access-entry --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:user/my-user --type STANDARD --username my-user +---- ++ +If you want this user to have more access to your cluster than the permissions in the [.noloc]`Kubernetes` API discovery roles, then you need to associate an access policy to the access entry, since the `--kubernetes-groups` option isn't used. For more information, see <> and https://kubernetes.io/docs/reference/access-authn-authz/rbac/#discovery-roles[API discovery roles] in the [.noloc]`Kubernetes` documentation. + + +[.topic] +[[updating-access-entries,updating-access-entries.title]] +== Update access entries + +You can update an access entry using the {aws-management-console} or the {aws} CLI. + + +[[access-update-console,access-update-console.title]] +=== {aws-management-console} +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster that you want to create an access entry in. +. Choose the *Access* tab. +. Choose the access entry that you want to update. +. Choose *Edit*. +. For *Username*, you can change the existing value. +. For *Groups*, you can remove existing group names or add new group names. If the following groups names exist, don't remove them: *system:nodes* or *system:bootstrappers*. Removing these groups can cause your cluster to function improperly. If you don't specify any group names and want to use Amazon EKS authorization, associate an xref:access-policies[access policy,linkend=access-policies] in a later step. +. For *Tags*, you can assign labels to the access entry. For example, to make it easier to find all resources with the same tag. You can also remove existing tags. +. Choose *Save changes*. +. If you want to associate an access policy to the entry, see <>. + +[[access-update-cli,access-update-cli.title]] +=== {aws} CLI +. Install the {aws} CLI, as described in link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. +. To update an access entry +Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your {aws} account ID, and [.replaceable]`EKS-my-cluster-my-namespace-Viewers` with the name of an IAM role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-access-entry --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/EKS-my-cluster-my-namespace-Viewers --kubernetes-groups Viewers +---- ++ +You can't use the `--kubernetes-groups` option if the type of the access entry is a value other than `STANDARD`. You also can't associate an access policy to an access entry with a type other than `STANDARD`. + + +[.topic] +[[deleting-access-entries,deleting-access-entries.title]] +== Delete access entries + +If you discover that you deleted an access entry in error, you can always recreate it. If the access entry that you're deleting is associated to any access policies, the associations are automatically deleted. You don't have to disassociate access policies from an access entry before deleting the access entry. + +You can delete an access entry using the {aws-management-console} or the {aws} CLI. + + +[[access-delete-console,access-delete-console.title]] +=== {aws-management-console} +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster that you want to delete an access entry from. +. Choose the *Access* tab. +. In the *Access entries* list, choose the access entry that you want to delete. +. Choose Delete. +. In the confirmation dialog box, choose *Delete*. + +[[access-delete-cli,access-delete-cli.title]] +=== {aws} CLI +. Install the {aws} CLI, as described in link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. +. To delete an access entry +Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your {aws} account ID, and [.replaceable]`my-role` with the name of the IAM role that you no longer want to have access to your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-access-entry --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/my-role +---- + + + diff --git a/latest/ug/manage-access/k8s-access/access-policies.adoc b/latest/ug/manage-access/k8s-access/access-policies.adoc new file mode 100644 index 00000000..3fad2dfd --- /dev/null +++ b/latest/ug/manage-access/k8s-access/access-policies.adoc @@ -0,0 +1,156 @@ +//!!NODE_ROOT
+ +[.topic] +[[access-policies,access-policies.title]] += Associate access policies with access entries +:info_doctype: section + +include::../../attributes.txt[] + +[abstract] +-- +Learn how to associate and disassociate Amazon EKS access policies to and from access entries to grant Kubernetes permissions to IAM principals. +-- + +You can assign one or more access policies to _access entries_ of _type_ `STANDARD`. Amazon EKS automatically grants the other types of access entries the permissions required to function properly in your cluster. Amazon EKS access policies include [.noloc]`Kubernetes` permissions, not IAM permissions. Before associating an access policy to an access entry, make sure that you're familiar with the [.noloc]`Kubernetes` permissions included in each access policy. For more information, see <>. If none of the access policies meet your requirements, then don't associate an access policy to an access entry. Instead, specify one or more _group names_ for the access entry and create and manage [.noloc]`Kubernetes` role-based access control objects. For more information, see <>. + + + +* An existing access entry. To create one, see <>. +* An {aws} Identity and Access Management role or user with the following permissions: `ListAccessEntries`, `DescribeAccessEntry`, `UpdateAccessEntry`, `ListAccessPolicies`, `AssociateAccessPolicy`, and `DisassociateAccessPolicy`. For more information, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-actions-as-permissions[Actions defined by Amazon Elastic Kubernetes Service,type="documentation"] in the _Service Authorization Reference_. + +Before associating access policies with access entries, consider the following requirements: + + + +* You can associate multiple access policies to each access entry, but you can only associate each policy to an access entry once. If you associate multiple access policies, the access entry's IAM principal has all permissions included in all associated access policies. +* You can scope an access policy to all resources on a cluster or by specifying the name of one or more [.noloc]`Kubernetes` namespaces. You can use wildcard characters for a namespace name. For example, if you want to scope an access policy to all namespaces that start with `dev-`, you can specify `dev-*` as a namespace name. Make sure that the namespaces exist on your cluster and that your spelling matches the actual namespace name on the cluster. Amazon EKS doesn't confirm the spelling or existence of the namespaces on your cluster. +* You can change the _access scope_ for an access policy after you associate it to an access entry. If you've scoped the access policy to [.noloc]`Kubernetes` namespaces, you can add and remove namespaces for the association, as necessary. +* If you associate an access policy to an access entry that also has _group names_ specified, then the IAM principal has all the permissions in all associated access policies. It also has all the permissions in any [.noloc]`Kubernetes` `Role` or `ClusterRole` object that is specified in any [.noloc]`Kubernetes` `Role` and `RoleBinding` objects that specify the group names. +* If you run the `kubectl auth can-i --list` command, you won't see any [.noloc]`Kubernetes` permissions assigned by access policies associated with an access entry for the IAM principal you're using when you run the command. The command only shows [.noloc]`Kubernetes` permissions if you've granted them in [.noloc]`Kubernetes` `Role` or `ClusterRole` objects that you've bound to the group names or username that you specified for an access entry. +* If you impersonate a [.noloc]`Kubernetes` user or group when interacting with [.noloc]`Kubernetes` objects on your cluster, such as using the `kubectl` command with `--as [.replaceable]``username``` or `--as-group [.replaceable]``group-name```, you're forcing the use of [.noloc]`Kubernetes` RBAC authorization. As a result, the IAM principal has no permissions assigned by any access policies associated to the access entry. The only [.noloc]`Kubernetes` permissions that the user or group that the IAM principal is impersonating has are the [.noloc]`Kubernetes` permissions that you've granted them in [.noloc]`Kubernetes` `Role` or `ClusterRole` objects that you've bound to the group names or user name. For your IAM principal to have the permissions in associated access policies, don't impersonate a [.noloc]`Kubernetes` user or group. The IAM principal will still also have any permissions that you've granted them in the [.noloc]`Kubernetes` `Role` or `ClusterRole` objects that you've bound to the group names or user name that you specified for the access entry. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation[User impersonation] in the [.noloc]`Kubernetes` documentation. + +You can associate an access policy to an access entry using the {aws-management-console} or the {aws} CLI. + + +[[access-associate-console,access-associate-console.title]] +== {aws-management-console} +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster that has an access entry that you want to associate an access policy to. +. Choose the *Access* tab. +. If the type of the access entry is *Standard*, you can associate or disassociate Amazon EKS *access policies*. If the type of your access entry is anything other than *Standard*, then this option isn't available. +. Choose *Associate access policy*. +. For *Policy name*, select the policy with the permissions you want the IAM principal to have. To view the permissions included in each policy, see <>. +. For *Access scope*, choose an access scope. If you choose *Cluster*, the permissions in the access policy are granted to the IAM principal for resources in all [.noloc]`Kubernetes` namespaces. If you choose *[.noloc]`Kubernetes` namespace*, you can then choose *Add new namespace*. In the *Namespace* field that appears, you can enter the name of a [.noloc]`Kubernetes` namespace on your cluster. If you want the IAM principal to have the permissions across multiple namespaces, then you can enter multiple namespaces. +. Choose *Add access policy*. + +[[access-associate-cli,access-associate-cli.title]] +== {aws} CLI +. Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. ++ +. View the available access policies. ++ +[source,bash] +---- +aws eks list-access-policies --output table +---- ++ +An example output is as follows. ++ +[source,bash] +---- +--------------------------------------------------------------------------------------------------------- +| ListAccessPolicies | ++-------------------------------------------------------------------------------------------------------+ +|| accessPolicies || +|+---------------------------------------------------------------------+-------------------------------+| +|| arn | name || +|+---------------------------------------------------------------------+-------------------------------+| +|| {arn-aws}eks::aws:cluster-access-policy/AmazonEKSAdminPolicy | AmazonEKSAdminPolicy || +|| {arn-aws}eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy | AmazonEKSClusterAdminPolicy || +|| {arn-aws}eks::aws:cluster-access-policy/AmazonEKSEditPolicy | AmazonEKSEditPolicy || +|| {arn-aws}eks::aws:cluster-access-policy/AmazonEKSViewPolicy | AmazonEKSViewPolicy || +|+---------------------------------------------------------------------+-------------------------------+| + +---- ++ +To view the permissions included in each policy, see <>. +. View your existing access entries. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks list-access-entries --cluster-name my-cluster +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ + "accessEntries": [ + "{arn-aws}iam::111122223333:role/my-role", + "{arn-aws}iam::111122223333:user/my-user" + ] +} +---- +. Associate an access policy to an access entry. The following example associates the `AmazonEKSViewPolicy` access policy to an access entry. Whenever the [.replaceable]`my-role` IAM role attempts to access [.noloc]`Kubernetes` objects on the cluster, Amazon EKS will authorize the role to use the permissions in the policy to access [.noloc]`Kubernetes` objects in the [.replaceable]`my-namespace1` and [.replaceable]`my-namespace2` [.noloc]`Kubernetes` namespaces only. Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your {aws} account ID, and [.replaceable]`my-role` with the name of the IAM role that you want Amazon EKS to authorize access to [.noloc]`Kubernetes` cluster objects for. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks associate-access-policy --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/my-role \ + --access-scope type=namespace,namespaces=my-namespace1,my-namespace2 --policy-arn {arn-aws}eks::aws:cluster-access-policy/AmazonEKSViewPolicy +---- ++ +If you want the IAM principal to have the permissions cluster-wide, replace `type=namespace,namespaces=[.replaceable]``my-namespace1``,[.replaceable]``my-namespace2``` with `type=cluster`. If you want to associate multiple access policies to the access entry, run the command multiple times, each with a unique access policy. Each associated access policy has its own scope. ++ +NOTE: If you later want to change the scope of an associated access policy, run the previous command again with the new scope. For example, if you wanted to remove [.replaceable]`my-namespace2`, you'd run the command again using `type=namespace,namespaces=[.replaceable]``my-namespace1``` only. If you wanted to change the scope from `namespace` to `cluster`, you'd run the command again using `type=cluster`, removing `type=namespace,namespaces=[.replaceable]``my-namespace1``,[.replaceable]``my-namespace2```. +. Determine which access policies are associated to an access entry. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks list-associated-access-policies --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/my-role +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +{ + "clusterName": "my-cluster", + "principalArn": "{arn-aws}iam::111122223333", + "associatedAccessPolicies": [ + { + "policyArn": "{arn-aws}eks::aws:cluster-access-policy/AmazonEKSViewPolicy", + "accessScope": { + "type": "cluster", + "namespaces": [] + }, + "associatedAt": "2023-04-17T15:25:21.675000-04:00", + "modifiedAt": "2023-04-17T15:25:21.675000-04:00" + }, + { + "policyArn": "{arn-aws}eks::aws:cluster-access-policy/AmazonEKSAdminPolicy", + "accessScope": { + "type": "namespace", + "namespaces": [ + "my-namespace1", + "my-namespace2" + ] + }, + "associatedAt": "2023-04-17T15:02:06.511000-04:00", + "modifiedAt": "2023-04-17T15:02:06.511000-04:00" + } + ] +} +---- ++ +In the previous example, the IAM principal for this access entry has view permissions across all namespaces on the cluster, and administrator permissions to two [.noloc]`Kubernetes` namespaces. +. Disassociate an access policy from an access entry. In this example, the `AmazonEKSAdminPolicy` policy is disassociated from an access entry. The IAM principal retains the permissions in the `AmazonEKSViewPolicy` access policy for objects in the [.replaceable]`my-namespace1` and [.replaceable]`my-namespace2` namespaces however, because that access policy is not disassociated from the access entry. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks disassociate-access-policy --cluster-name my-cluster --principal-arn {arn-aws}iam::111122223333:role/my-role \ + --policy-arn {arn-aws}eks::aws:cluster-access-policy/AmazonEKSAdminPolicy +---- + +To list available access policies, see <>. \ No newline at end of file diff --git a/latest/ug/manage-access/k8s-access/access-policy-reference.adoc b/latest/ug/manage-access/k8s-access/access-policy-reference.adoc new file mode 100644 index 00000000..833184b6 --- /dev/null +++ b/latest/ug/manage-access/k8s-access/access-policy-reference.adoc @@ -0,0 +1,585 @@ + +//!!NODE_ROOT
+[.topic] +[[access-policy-permissions,access-policy-permissions.title]] += Review access policy permissions +:info_doctype: section + +include::../../attributes.txt[] + +Access policies include `rules` that contain [.noloc]`Kubernetes` `verbs` (permissions) and `resources`. Access policies don't include IAM permissions or resources. Similar to [.noloc]`Kubernetes` `Role` and `ClusterRole` objects, access policies only include `allow` `rules`. You can't modify the contents of an access policy. You can't create your own access policies. If the permissions in the access policies don't meet your needs, then create [.noloc]`Kubernetes` RBAC objects and specify _group names_ for your access entries. For more information, see <>. The permissions contained in access policies are similar to the permissions in the [.noloc]`Kubernetes` user-facing cluster roles. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles[User-facing roles] in the [.noloc]`Kubernetes` documentation. + +Choose any access policy to see its contents. Each row of each table in each access policy is a separate rule. + + +[[access-policy-permissions-amazoneksadminpolicy,access-policy-permissions-amazoneksadminpolicy.title]] +== AmazonEKSAdminPolicy + + +This access policy includes permissions that grant an IAM principal most permissions to resources. When associated to an access entry, its access scope is typically one or more [.noloc]`Kubernetes` namespaces. If you want an IAM principal to have administrator access to all resources on your cluster, associate the <> access policy to your access entry instead. + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSAdminPolicy` + +[cols="1,1,1", options="header"] +|=== +|Kubernetes API groups +|Kubernetes resources +|Kubernetes verbs (permissions) + + +|`apps` +|`daemonsets`, `deployments`, `deployments/rollback`, `deployments/scale`, `replicasets`, `replicasets/scale`, `statefulsets`, `statefulsets/scale` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`apps` +|`controllerrevisions`, `daemonsets`, `daemonsets/status`, `deployments`, `deployments/scale`, `deployments/status`, `replicasets`, `replicasets/scale`, `replicasets/status`, `statefulsets`, `statefulsets/scale`, `statefulsets/status` +|`get`, `list`, `watch` + +|`authorization.k8s.io` +|`localsubjectaccessreviews` +|`create` + +|`autoscaling` +|`horizontalpodautoscalers` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`autoscaling` +|`horizontalpodautoscalers`, `horizontalpodautoscalers/status` +|`get`, `list`, `watch` + +|`batch` +|`cronjobs`, `jobs` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`batch` +|`cronjobs`, `cronjobs/status`, `jobs`, `jobs/status` +|`get`, `list`, `watch` + +|`discovery.k8s.io` +|`endpointslices` +|`get`, `list`, `watch` + +|`extensions` +|`daemonsets`, `deployments`, `deployments/rollback`, `deployments/scale`, `ingresses`, `networkpolicies`, `replicasets`, `replicasets/scale`, `replicationcontrollers/scale` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`extensions` +|`daemonsets`, `daemonsets/status`, `deployments`, `deployments/scale`, `deployments/status`, `ingresses`, `ingresses/status`, `networkpolicies`, `replicasets`, `replicasets/scale`, `replicasets/status`, `replicationcontrollers/scale` +|`get`, `list`, `watch` + +|`networking.k8s.io` +|`ingresses`, `ingresses/status`, `networkpolicies` +|`get`, `list`, `watch` + +|`networking.k8s.io` +|`ingresses`, `networkpolicies` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`policy` +|`poddisruptionbudgets` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`policy` +|`poddisruptionbudgets`, `poddisruptionbudgets/status` +|`get`, `list`, `watch` + +|`rbac.authorization.k8s.io` +|`rolebindings`, `roles` +|`create`, `delete`, `deletecollection`, `get`, `list`, `patch`, `update`, `watch` + +| +|`configmaps`, `endpoints`, `persistentvolumeclaims`, `persistentvolumeclaims/status`, `pods`, `replicationcontrollers`, `replicationcontrollers/scale`, `serviceaccounts`, `services`, `services/status` +|`get`,``list``, `watch` + +| +|``pods/attach``, `pods/exec`, `pods/portforward`, `pods/proxy`, `secrets`, `services/proxy` +|`get`, `list`, `watch` + +| +|`configmaps`, `events`, `persistentvolumeclaims`, `replicationcontrollers`, `replicationcontrollers/scale`, `secrets`, `serviceaccounts`, `services`, `services/proxy` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +| +|`pods`, `pods/attach`, `pods/exec`, `pods/portforward`, `pods/proxy` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +| +|`serviceaccounts` +|`impersonate` + +| +|`bindings`, `events`, `limitranges`, `namespaces/status`, `pods/log`, `pods/status`, `replicationcontrollers/status`, `resourcequotas`, `resourcequotas/status` +|`get`, `list`, `watch` + +| +|`namespaces` +|`get`,``list``, `watch` +|=== + + +[[access-policy-permissions-amazoneksclusteradminpolicy,access-policy-permissions-amazoneksclusteradminpolicy.title]] +== AmazonEKSClusterAdminPolicy + + +This access policy includes permissions that grant an IAM principal administrator access to a cluster. When associated to an access entry, its access scope is typically the cluster, rather than a [.noloc]`Kubernetes` namespace. If you want an IAM principal to have a more limited administrative scope, consider associating the <> access policy to your access entry instead. + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy` + +[cols="1,1,1,1", options="header"] +|=== +|Kubernetes API groups +|Kubernetes nonResourceURLs +|Kubernetes resources +|Kubernetes verbs (permissions) + + +|`{asterisk}` +| +|`{asterisk}` +|`{asterisk}` + +| +|`{asterisk}` +| +|`{asterisk}` +|=== + + +[[access-policy-permissions-amazoneksadminviewpolicy,access-policy-permissions-amazoneksadminviewpolicy.title]] +== AmazonEKSAdminViewPolicy + +This access policy includes permissions that grant an IAM principal access to list/view all resources in a cluster. Note this includes https://kubernetes.io/docs/concepts/configuration/secret/[Kubernetes Secrets.] + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSAdminViewPolicy` + +[cols="1,1,1", options="header"] +|=== +|Kubernetes API groups +|Kubernetes resources +|Kubernetes verbs (permissions) + + +|`{asterisk}` +|`{asterisk}` +|`get`, `list`, `watch` +|=== + + +[[access-policy-permissions-amazonekseditpolicy,access-policy-permissions-amazonekseditpolicy.title]] +== AmazonEKSEditPolicy + + +This access policy includes permissions that allow an IAM principal to edit most [.noloc]`Kubernetes` resources. + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSEditPolicy` + +[cols="1,1,1", options="header"] +|=== +|Kubernetes API groups +|Kubernetes resources +|Kubernetes verbs (permissions) + + +|`apps` +|`daemonsets`, `deployments`, `deployments/rollback`, `deployments/scale`, `replicasets`, `replicasets/scale`, `statefulsets`, `statefulsets/scale` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`apps` +|`controllerrevisions`, `daemonsets`, `daemonsets/status`, `deployments`, `deployments/scale`, `deployments/status`, `replicasets`, `replicasets/scale`, `replicasets/status`, `statefulsets`, `statefulsets/scale`, `statefulsets/status` +|`get`, `list`, `watch` + +|`autoscaling` +|`horizontalpodautoscalers`, `horizontalpodautoscalers/status` +|`get`, `list`, `watch` + +|`autoscaling` +|`horizontalpodautoscalers` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`batch` +|`cronjobs`, `jobs` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`batch` +|`cronjobs`, `cronjobs/status`, `jobs`, `jobs/status` +|`get`, `list`, `watch` + +|`discovery.k8s.io` +|`endpointslices` +|`get`, `list`, `watch` + +|`extensions` +|`daemonsets`, `deployments`, `deployments/rollback`, `deployments/scale`, `ingresses`, `networkpolicies`, `replicasets`, `replicasets/scale`, `replicationcontrollers/scale` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`extensions` +|`daemonsets`, `daemonsets/status`, `deployments`, `deployments/scale`, `deployments/status`, `ingresses`, `ingresses/status`, `networkpolicies`, `replicasets`, `replicasets/scale`, `replicasets/status`, `replicationcontrollers/scale` +|`get`, `list`, `watch` + +|`networking.k8s.io` +|`ingresses`, `networkpolicies` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`networking.k8s.io` +|`ingresses`, `ingresses/status`, `networkpolicies` +|`get`, `list`, `watch` + +|`policy` +|`poddisruptionbudgets` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +|`policy` +|`poddisruptionbudgets`, `poddisruptionbudgets/status` +|`get`, `list`, `watch` + +| +|`namespaces` +|`get`, `list`, `watch` + +| +|``pods/attach``, `pods/exec`, `pods/portforward`, `pods/proxy`, `secrets`, `services/proxy` +|`get`, `list`, `watch` + +| +|`serviceaccounts` +|`impersonate` + +| +|`pods`, `pods/attach`, `pods/exec`, `pods/portforward`, `pods/proxy` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +| +|`configmaps`, `events`, `persistentvolumeclaims`, `replicationcontrollers`, `replicationcontrollers/scale`, `secrets`, `serviceaccounts`, `services`, `services/proxy` +|`create`, `delete`, `deletecollection`, `patch`, `update` + +| +|`configmaps`, `endpoints`, `persistentvolumeclaims`, `persistentvolumeclaims/status`, `pods`, `replicationcontrollers`, `replicationcontrollers/scale`, `serviceaccounts`, `services`, `services/status` +|`get`, `list`, `watch` + +| +|`bindings`, `events`, `limitranges`, `namespaces/status`, `pods/log`, `pods/status`, `replicationcontrollers/status`, `resourcequotas`, `resourcequotas/status` +|`get`, `list`, `watch` +|=== + + +[[access-policy-permissions-amazoneksviewpolicy.json,access-policy-permissions-amazoneksviewpolicy.json.title]] +== AmazonEKSViewPolicy + +This access policy includes permissions that allow an IAM principal to view most [.noloc]`Kubernetes` resources. + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSViewPolicy` + +[cols="1,1,1", options="header"] +|=== +|Kubernetes API groups +|Kubernetes resources +|Kubernetes verbs (permissions) + + +|`apps` +|`controllerrevisions`, `daemonsets`, `daemonsets/status`, `deployments`, `deployments/scale`, `deployments/status`, `replicasets`, `replicasets/scale`, `replicasets/status`, `statefulsets`, `statefulsets/scale`, `statefulsets/status` +|`get`, `list`, `watch` + +|`autoscaling` +|`horizontalpodautoscalers`, `horizontalpodautoscalers/status` +|`get`, `list`, `watch` + +|`batch` +|`cronjobs`, `cronjobs/status`, `jobs`, `jobs/status` +|`get`, `list`, `watch` + +|`discovery.k8s.io` +|`endpointslices` +|`get`, `list`, `watch` + +|`extensions` +|`daemonsets`, `daemonsets/status`, `deployments`, `deployments/scale`, `deployments/status`, `ingresses`, `ingresses/status`, `networkpolicies`, `replicasets`, `replicasets/scale`, `replicasets/status`, `replicationcontrollers/scale` +|`get`, `list`, `watch` + +|`networking.k8s.io` +|`ingresses`, `ingresses/status`, `networkpolicies` +|`get`, `list`, `watch` + +|`policy` +|`poddisruptionbudgets`, `poddisruptionbudgets/status` +|`get`, `list`, `watch` + +| +|`configmaps`, `endpoints`, `persistentvolumeclaims`, `persistentvolumeclaims/status`, `pods`, `replicationcontrollers`, `replicationcontrollers/scale`, `serviceaccounts`, `services`, `services/status` +|`get`, `list`, `watch` + +| +|`bindings`, `events`, `limitranges`, `namespaces/status`, `pods/log`, `pods/status`, `replicationcontrollers/status`, `resourcequotas`, r``esourcequotas/status`` +|`get`, `list`, `watch` + +| +|`namespaces` +|`get`, `list`, `watch` +|=== + + +== AmazonEKSAutoNodePolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSAutoNodePolicy` + +This policy includes the following permissions that allow Amazon EKS components to complete the following tasks: + +* `kube-proxy` – Monitor network endpoints and services, and manage related events. This enables cluster-wide network proxy functionality. +* `ipamd` – Manage {aws} VPC networking resources and container network interfaces (CNI). This allows the IP address management daemon to handle pod networking. +* `coredns` – Access service discovery resources like endpoints and services. This enables DNS resolution within the cluster. +* `ebs-csi-driver` – Work with storage-related resources for Amazon EBS volumes. This allows dynamic provisioning and attachment of persistent volumes. +* `neuron` – Monitor nodes and pods for {aws} Neuron devices. This enables management of {aws} Inferentia and Trainium accelerators. +* `node-monitoring-agent` – Access node diagnostics and events. This enables cluster health monitoring and diagnostics collection. + +Each component uses a dedicated service account and is restricted to only the permissions required for its specific function. + +If you manually specifiy a Node IAM role in a NodeClass, you need to create an Access Entry that associates the new Node IAM role with this Access Policy. + + +== AmazonEKSBlockStoragePolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSBlockStoragePolicy` + +This policy includes permissions that allow Amazon EKS to manage leader election and coordination resources for storage operations: + +* `coordination.k8s.io` – Create and manage lease objects for leader election. This enables EKS storage components to coordinate their activities across the cluster through a leader election mechanism. + +The policy is scoped to specific lease resources used by the EKS storage components to prevent conflicting access to other coordination resources in the cluster. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the block storage capability to function properly. + + +== AmazonEKSLoadBalancingPolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSLoadBalancingPolicy` + +This policy includes permissions that allow Amazon EKS to manage leader election resources for load balancing: + +* `coordination.k8s.io` – Create and manage lease objects for leader election. This enables EKS load balancing components to coordinate activities across multiple replicas by electing a leader. + +The policy is scoped specifically to load balancing lease resources to ensure proper coordination while preventing access to other lease resources in the cluster. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the networking capability to function properly. + + +== AmazonEKSNetworkingPolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSNetworkingPolicy` + + +This policy includes permissions that allow Amazon EKS to manage leader election resources for networking: + +* `coordination.k8s.io` – Create and manage lease objects for leader election. This enables EKS networking components to coordinate IP address allocation activities by electing a leader. + +The policy is scoped specifically to networking lease resources to ensure proper coordination while preventing access to other lease resources in the cluster. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the networking capability to function properly. + + +== AmazonEKSComputePolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSComputePolicy` + +This policy includes permissions that allow Amazon EKS to manage leader election resources for compute operations: + +* `coordination.k8s.io` – Create and manage lease objects for leader election. This enables EKS compute components to coordinate node scaling activities by electing a leader. + +The policy is scoped specifically to compute management lease resources while allowing basic read access (`get`, `watch`) to all lease resources in the cluster. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the networking capability to function properly. + +== AmazonEKSBlockStorageClusterPolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSBlockStorageClusterPolicy` + +This policy grants permissions necessary for the block storage capability of Amazon EKS Auto Mode. It enables efficient management of block storage resources within Amazon EKS clusters. The policy includes the following permissions: + +CSI Driver Management: + +- Create, read, update, and delete CSI drivers, specifically for block storage. + +Volume Management: + +- List, watch, create, update, patch, and delete persistent volumes. +- List, watch, and update persistent volume claims. +- Patch persistent volume claim statuses. + +Node and Pod Interaction: + +- Read node and pod information. +- Manage events related to storage operations. + +Storage Classes and Attributes: + +- Read storage classes and CSI nodes. +- Read volume attribute classes. + +Volume Attachments: + +- List, watch, and modify volume attachments and their statuses. + +Snapshot Operations: + +- Manage volume snapshots, snapshot contents, and snapshot classes. +- Handle operations for volume group snapshots and related resources. + +This policy is designed to support comprehensive block storage management within Amazon EKS clusters running in Auto Mode. It combines permissions for various operations including provisioning, attaching, resizing, and snapshotting of block storage volumes. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the block storage capability to function properly. + +== AmazonEKSComputeClusterPolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSComputeClusterPolicy` + +This policy grants permissions necessary for the compute management capability of Amazon EKS Auto Mode. It enables efficient orchestration and scaling of compute resources within Amazon EKS clusters. The policy includes the following permissions: + +Node Management: + +- Create, read, update, delete, and manage status of NodePools and NodeClaims. +- Manage NodeClasses, including creation, modification, and deletion. + +Scheduling and Resource Management: + +- Read access to pods, nodes, persistent volumes, persistent volume claims, replication controllers, and namespaces. +- Read access to storage classes, CSI nodes, and volume attachments. +- List and watch deployments, daemon sets, replica sets, and stateful sets. +- Read pod disruption budgets. + +Event Handling: + +- Create, read, and manage cluster events. + +Node Deprovisioning and Pod Eviction: + +- Update, patch, and delete nodes. +- Create pod evictions and delete pods when necessary. + +Custom Resource Definition (CRD) Management: + +- Create new CRDs. +- Manage specific CRDs related to node management (NodeClasses, NodePools, NodeClaims, and NodeDiagnostics). + +This policy is designed to support comprehensive compute management within Amazon EKS clusters running in Auto Mode. It combines permissions for various operations including node provisioning, scheduling, scaling, and resource optimization. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the compute management capability to function properly. + +== AmazonEKSLoadBalancingClusterPolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSLoadBalancingClusterPolicy` + +This policy grants permissions necessary for the load balancing capability of Amazon EKS Auto Mode. It enables efficient management and configuration of load balancing resources within Amazon EKS clusters. The policy includes the following permissions: + +Event and Resource Management: + +- Create and patch events. +- Read access to pods, nodes, endpoints, and namespaces. +- Update pod statuses. + +Service and Ingress Management: + +- Full management of services and their statuses. +- Comprehensive control over ingresses and their statuses. +- Read access to endpoint slices and ingress classes. + +Target Group Bindings: + +- Create and modify target group bindings and their statuses. +- Read access to ingress class parameters. + +Custom Resource Definition (CRD) Management: + +- Create and read all CRDs. +- Specific management of targetgroupbindings.eks.amazonaws.com and ingressclassparams.eks.amazonaws.com CRDs. + +Webhook Configuration: + +- Create and read mutating and validating webhook configurations. +- Manage the eks-load-balancing-webhook configuration. + +This policy is designed to support comprehensive load balancing management within Amazon EKS clusters running in Auto Mode. It combines permissions for various operations including service exposure, ingress routing, and integration with {aws} load balancing services. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the load balancing capability to function properly. + +== AmazonEKSNetworkingClusterPolicy + +*ARN* – `{arn-aws}eks::aws:cluster-access-policy/AmazonEKSNetworkingClusterPolicy` + +AmazonEKSNetworkingClusterPolicy + +This policy grants permissions necessary for the networking capability of Amazon EKS Auto Mode. It enables efficient management and configuration of networking resources within Amazon EKS clusters. The policy includes the following permissions: + +Node and Pod Management: + +- Read access to NodeClasses and their statuses. +- Read access to NodeClaims and their statuses. +- Read access to pods. + +CNI Node Management: + +- Permissions for CNINodes and their statuses, including create, read, update, delete, and patch. + +Custom Resource Definition (CRD) Management: + +- Create and read all CRDs. +- Specific management (update, patch, delete) of the cninodes.eks.amazonaws.com CRD. + +Event Management: + +- Create and patch events. + +This policy is designed to support comprehensive networking management within Amazon EKS clusters running in Auto Mode. It combines permissions for various operations including node networking configuration, CNI (Container Network Interface) management, and related custom resource handling. + +The policy allows the networking components to interact with node-related resources, manage CNI-specific node configurations, and handle custom resources critical for networking operations in the cluster. + +Amazon EKS automatically creates an access entry with this access policy for the cluster IAM role when Auto Mode is enabled, ensuring that the necessary permissions are in place for the networking capability to function properly. + +[[access-policy-permissions-amazonekshybridpolicy,access-policy-permissions-amazonekshybridpolicy.title]] +== AmazonEKSHybridPolicy + + +This access policy includes permissions that grant EKS access to the nodes of a cluster. When associated to an access entry, its access scope is typically the cluster, rather than a [noloc]``Kubernetes`` namespace. This policy is used by Amazon EKS hybrid nodes. + +*ARN* – `arn:aws:eks::aws:cluster-access-policy/AmazonEKSHybridPolicy` + +[cols="1,1,1,1", options="header"] +|=== +| Kubernetes API groups +| Kubernetes nonResourceURLs +| Kubernetes resources +| Kubernetes verbs (permissions) + + +|`{asterisk}` +| +|`nodes` +|`list` +|=== + + +[[access-policy-updates,access-policy-updates.title]] +== Access policy updates + +View details about updates to access policies, since they were introduced. For automatic alerts about changes to this page, subscribe to the RSS feed in <>. + +[cols="1,1,1", options="header"] +|=== +|Change +|Description +|Date + +|Add policies for Amazon EKS Hybrid +|Publish `AmazonEKSHybridPolicy` +|December 2, 2024 + +|Add policies for Amazon EKS Auto Mode +|These access policies give the Cluster IAM Role and Node IAM Role permission to call Kubernetes APIs. {aws} uses these to automate routine tasks for storage, compute, and networking resources. +|December 2, 2024 + +|Add `AmazonEKSAdminViewPolicy` +|Add a new policy for expanded view access, including resources like Secrets. +|April 23, 2024 + +|Access policies introduced. +|Amazon EKS introduced access policies. +|May 29, 2023 +|=== diff --git a/latest/ug/manage-access/k8s-access/auth-configmap.adoc b/latest/ug/manage-access/k8s-access/auth-configmap.adoc new file mode 100644 index 00000000..4ceeb3b3 --- /dev/null +++ b/latest/ug/manage-access/k8s-access/auth-configmap.adoc @@ -0,0 +1,372 @@ +//!!NODE_ROOT
+ +[.topic] +[[auth-configmap,auth-configmap.title]] += Grant [.noloc]`IAM` users access to [.noloc]`Kubernetes` with a [.noloc]`ConfigMap` +:info_doctype: section + +include::../../attributes.txt[] + + +[abstract] +-- +Learn how to manage IAM principal access to your Amazon EKS cluster using the aws-auth ConfigMap and Kubernetes RBAC, allowing authorized users and roles to interact with the cluster securely. +-- + +[IMPORTANT] +==== + +The `aws-auth ConfigMap` is deprecated. For the recommended method to manage access to [.noloc]`Kubernetes` APIs, see <>. + +==== + +Access to your cluster using link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"] is enabled by the https://github.com/kubernetes-sigs/aws-iam-authenticator#readme[{aws} IAM Authenticator for Kubernetes], which runs on the Amazon EKS control plane. The authenticator gets its configuration information from the `aws-auth` `ConfigMap`. For all `aws-auth` `ConfigMap` settings, see https://github.com/kubernetes-sigs/aws-iam-authenticator#full-configuration-format[Full Configuration Format] on [.noloc]`GitHub`. + +[[aws-auth-users,aws-auth-users.title]] +== Add IAM principals to your Amazon EKS cluster + +When you create an Amazon EKS cluster, the link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that creates the cluster is automatically granted `system:masters` permissions in the cluster's role-based access control (RBAC) configuration in the Amazon EKS control plane. This principal doesn't appear in any visible configuration, so make sure to keep track of which principal originally created the cluster. To grant additional IAM principals the ability to interact with your cluster, edit the `aws-auth ConfigMap` within [.noloc]`Kubernetes` and create a [.noloc]`Kubernetes` `rolebinding` or `clusterrolebinding` with the name of a `group` that you specify in the `aws-auth ConfigMap`. + +[NOTE] +==== + +For more information about [.noloc]`Kubernetes` role-based access control (RBAC) configuration, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the [.noloc]`Kubernetes` documentation. + +==== +. Determine which credentials `kubectl` is using to access your cluster. On your computer, you can see which credentials `kubectl` uses with the following command. Replace [.replaceable]`~/.kube/config` with the path to your `kubeconfig` file if you don't use the default path. ++ +[source,bash,subs="verbatim,attributes"] +---- +cat ~/.kube/config +---- ++ +An example output is as follows. ++ +[source,yaml,subs="verbatim,attributes"] +---- +[...] +contexts: +- context: + cluster: my-cluster.region-code.eksctl.io + user: admin@my-cluster.region-code.eksctl.io + name: admin@my-cluster.region-code.eksctl.io +current-context: admin@my-cluster.region-code.eksctl.io +[...] +---- ++ +In the previous example output, the credentials for a user named [.replaceable]`admin` are configured for a cluster named [.replaceable]`my-cluster`. If this is the user that created the cluster, then it already has access to your cluster. If it's not the user that created the cluster, then you need to complete the remaining steps to enable cluster access for other IAM principals. link:IAM/latest/UserGuide/id_users.html[IAM best practices,type="documentation"] recommend that you grant permissions to roles instead of users. You can see which other principals currently have access to your cluster with the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe -n kube-system configmap/aws-auth +---- ++ +An example output is as follows. ++ +[source,subs="verbatim,attributes"] +.... +Name: aws-auth +Namespace: kube-system +Labels: +Annotations: + +Data +==== +mapRoles: +---- +- groups: + - system:bootstrappers + - system:nodes + rolearn: {arn-aws}iam::111122223333:role/my-node-role + username: system:node:{{EC2PrivateDNSName}} + + +BinaryData +==== + +Events: +.... ++ +The previous example is a default `aws-auth` `ConfigMap`. Only the node instance role has access to the cluster. +. Make sure that you have existing [.noloc]`Kubernetes` `roles` and `rolebindings` or `clusterroles` and `clusterrolebindings` that you can map IAM principals to. For more information about these resources, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the [.noloc]`Kubernetes` documentation. ++ +.. View your existing [.noloc]`Kubernetes` `roles` or `clusterroles`. `Roles` are scoped to a `namespace`, but `clusterroles` are scoped to the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get roles -A +---- ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get clusterroles +---- +.. View the details of any `role` or `clusterrole` returned in the previous output and confirm that it has the permissions (`rules`) that you want your IAM principals to have in your cluster. ++ +Replace [.replaceable]`role-name` with a `role` name returned in the output from the previous command. Replace [.replaceable]`kube-system` with the namespace of the `role`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe role role-name -n kube-system +---- ++ +Replace [.replaceable]`cluster-role-name` with a `clusterrole` name returned in the output from the previous command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe clusterrole cluster-role-name +---- +.. View your existing [.noloc]`Kubernetes` `rolebindings` or `clusterrolebindings`. `Rolebindings` are scoped to a `namespace`, but `clusterrolebindings` are scoped to the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get rolebindings -A +---- ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get clusterrolebindings +---- +.. View the details of any `rolebinding` or `clusterrolebinding` and confirm that it has a `role` or `clusterrole` from the previous step listed as a `roleRef` and a group name listed for `subjects`. ++ +Replace [.replaceable]`role-binding-name` with a `rolebinding` name returned in the output from the previous command. Replace [.replaceable]`kube-system` with the `namespace` of the `rolebinding`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe rolebinding role-binding-name -n kube-system +---- ++ +An example output is as follows. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: eks-console-dashboard-restricted-access-role-binding + namespace: default +subjects: +- kind: Group + name: eks-console-dashboard-restricted-access-group + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: eks-console-dashboard-restricted-access-role + apiGroup: rbac.authorization.k8s.io +---- ++ +Replace [.replaceable]`cluster-role-binding-name` with a `clusterrolebinding` name returned in the output from the previous command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe clusterrolebinding cluster-role-binding-name +---- ++ +An example output is as follows. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: eks-console-dashboard-full-access-binding +subjects: +- kind: Group + name: eks-console-dashboard-full-access-group + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: eks-console-dashboard-full-access-clusterrole + apiGroup: rbac.authorization.k8s.io +---- ++ +. Edit the `aws-auth` `ConfigMap`. You can use a tool such as `eksctl` to update the `ConfigMap` or you can update it manually by editing it. ++ +IMPORTANT: We recommend using `eksctl`, or another tool, to edit the `ConfigMap`. For information about other tools you can use, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-tools-to-make-changes-to-the-aws-auth-configmap[Use tools to make changes to the aws-authConfigMap] in the Amazon EKS best practices guides. An improperly formatted `aws-auth` `ConfigMap` can cause you to lose access to your cluster. ++ +** View steps to xref:configmap-eksctl[edit configmap with eksctl]. +** View steps to xref:configmap-manual[edit configmap manually]. + +[[configmap-eksctl,configmap-eksctl.title]] +=== Edit Configmap with Eksctl + +. You need version `{eksctl-min-version}` or later of the `eksctl` command line tool installed on your device or {aws} CloudShell. To install or update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. ++ +. View the current mappings in the `ConfigMap`. Replace [.replaceable]`my-cluster` with the name of your cluster. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl get iamidentitymapping --cluster my-cluster --region=region-code +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +ARN USERNAME GROUPS ACCOUNT +{arn-aws}iam::111122223333:role/eksctl-my-cluster-my-nodegroup-NodeInstanceRole-1XLS7754U3ZPA system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes +---- +. Add a mapping for a role. Replace [.replaceable]`my-role` with your role name. Replace [.replaceable]`eks-console-dashboard-full-access-group` with the name of the group specified in your [.noloc]`Kubernetes` `RoleBinding` or `ClusterRoleBinding` object. Replace [.replaceable]`111122223333` with your account ID. You can replace [.replaceable]`admin` with any name you choose. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create iamidentitymapping --cluster my-cluster --region=region-code \ + --arn {arn-aws}iam::111122223333:role/my-role --username admin --group eks-console-dashboard-full-access-group \ + --no-duplicate-arns +---- ++ +IMPORTANT: The role ARN can't include a path such as `role/my-team/developers/my-role`. The format of the ARN must be `{arn-aws}iam::[.replaceable]``111122223333``:role/[.replaceable]``my-role```. In this example, `my-team/developers/` needs to be removed. ++ +An example output is as follows. ++ +[literal] +---- +[...] +2022-05-09 14:51:20 [ℹ] adding identity "{arn-aws}iam::111122223333:role/my-role" to auth ConfigMap +---- +. Add a mapping for a user. link:IAM/latest/UserGuide/id_users.html[IAM best practices,type="documentation"] recommend that you grant permissions to roles instead of users. Replace [.replaceable]`my-user` with your user name. Replace [.replaceable]`eks-console-dashboard-restricted-access-group` with the name of the group specified in your [.noloc]`Kubernetes` `RoleBinding` or `ClusterRoleBinding` object. Replace [.replaceable]`111122223333` with your account ID. You can replace [.replaceable]`my-user` with any name you choose. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create iamidentitymapping --cluster my-cluster --region=region-code \ + --arn {arn-aws}iam::111122223333:user/my-user --username my-user --group eks-console-dashboard-restricted-access-group \ + --no-duplicate-arns +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +[...] +2022-05-09 14:53:48 [ℹ] adding identity "{arn-aws}iam::111122223333:user/my-user" to auth ConfigMap +---- +. View the mappings in the `ConfigMap` again. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl get iamidentitymapping --cluster my-cluster --region=region-code +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +ARN USERNAME GROUPS ACCOUNT +{arn-aws}iam::111122223333:role/eksctl-my-cluster-my-nodegroup-NodeInstanceRole-1XLS7754U3ZPA system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes +{arn-aws}iam::111122223333:role/admin my-role eks-console-dashboard-full-access-group +{arn-aws}iam::111122223333:user/my-user my-user eks-console-dashboard-restricted-access-group +---- + +[[configmap-manual,configmap-manual.title]] +=== Edit Configmap manually +. Open the `ConfigMap` for editing. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit -n kube-system configmap/aws-auth +---- ++ +NOTE: If you receive an error stating "``Error from server (NotFound): configmaps "aws-auth" not found``", then use the procedure in <> to apply the stock `ConfigMap`. +. Add your IAM principals to the `ConfigMap`. An IAM group isn't an IAM principal, so it can't be added to the `ConfigMap`. ++ +** *To add an IAM role (for example, for link:IAM/latest/UserGuide/id_roles_providers.html[federated users,type="documentation"]):* Add the role details to the `mapRoles` section of the `ConfigMap`, under `data`. Add this section if it does not already exist in the file. Each entry supports the following parameters: ++ +*** *rolearn*: The ARN of the IAM role to add. This value can't include a path. For example, you can't specify an ARN such as `{arn-aws}iam::[.replaceable]``111122223333``:role/my-team/developers/[.replaceable]``role-name```. The ARN needs to be `{arn-aws}iam::[.replaceable]``111122223333``:role/[.replaceable]``role-name``` instead. +*** *username*: The user name within [.noloc]`Kubernetes` to map to the IAM role. +*** *groups*: The group or list of [.noloc]`Kubernetes` groups to map the role to. The group can be a default group, or a group specified in a `clusterrolebinding` or `rolebinding`. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings[Default roles and role bindings] in the [.noloc]`Kubernetes` documentation. +** *To add an IAM user:* + link:IAM/latest/UserGuide/id_users.html[IAM best practices,type="documentation"] recommend that you grant permissions to roles instead of users. Add the user details to the `mapUsers` section of the `ConfigMap`, under `data`. Add this section if it does not already exist in the file. Each entry supports the following parameters: ++ +*** *userarn*: The ARN of the IAM user to add. +*** *username*: The user name within [.noloc]`Kubernetes` to map to the IAM user. +*** *groups*: The group, or list of [.noloc]`Kubernetes` groups to map the user to. The group can be a default group, or a group specified in a `clusterrolebinding` or `rolebinding`. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings[Default roles and role bindings] in the [.noloc]`Kubernetes` documentation. ++ +. For example, the following YAML block contains: +** A `mapRoles` section that maps the IAM node instance to [.noloc]`Kubernetes` groups so that nodes can register themselves with the cluster and the `my-console-viewer-role` IAM role that is mapped to a [.noloc]`Kubernetes` group that can view all [.noloc]`Kubernetes` resources for all clusters. For a list of the IAM and [.noloc]`Kubernetes` group permissions required for the `my-console-viewer-role` IAM role, see <>. +** A `mapUsers` section that maps the `admin` IAM user from the default {aws} account to the `system:masters` [.noloc]`Kubernetes` group and the `my-user` user from a different {aws} account that is mapped to a [.noloc]`Kubernetes` group that can view [.noloc]`Kubernetes` resources for a specific namespace. For a list of the IAM and [.noloc]`Kubernetes` group permissions required for the `my-user` IAM user, see <>. ++ +Add or remove lines as necessary and replace all [.replaceable]`example values` with your own values. ++ +[source,yaml,subs="verbatim,attributes"] +---- +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +data: + mapRoles: | + - groups: + - system:bootstrappers + - system:nodes + rolearn: {arn-aws}iam::111122223333:role/my-role + username: system:node:{{EC2PrivateDNSName}} + - groups: + - eks-console-dashboard-full-access-group + rolearn: {arn-aws}iam::111122223333:role/my-console-viewer-role + username: my-console-viewer-role + mapUsers: | + - groups: + - system:masters + userarn: {arn-aws}iam::111122223333:user/admin + username: admin + - groups: + - eks-console-dashboard-restricted-access-group + userarn: {arn-aws}iam::444455556666:user/my-user + username: my-user +---- +. Save the file and exit your text editor. + + +[[aws-auth-configmap,aws-auth-configmap.title]] +== Apply the `aws-auth`   `ConfigMap` to your cluster + +The `aws-auth` `ConfigMap` is automatically created and applied to your cluster when you create a managed node group or when you create a node group using `eksctl`. It is initially created to allow nodes to join your cluster, but you also use this `ConfigMap` to add role-based access control (RBAC) access to IAM principals. If you've launched self-managed nodes and haven't applied the `aws-auth` `ConfigMap` to your cluster, you can do so with the following procedure. + +. Check to see if you've already applied the `aws-auth` `ConfigMap`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe configmap -n kube-system aws-auth +---- ++ +If you receive an error stating "``Error from server (NotFound): configmaps "aws-auth" not found``", then proceed with the following steps to apply the stock `ConfigMap`. +. Download, edit, and apply the {aws} authenticator configuration map. ++ +.. Download the configuration map. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/aws-auth-cm.yaml +---- +.. In the `aws-auth-cm.yaml` file, set the `rolearn` to the Amazon Resource Name (ARN) of the IAM role associated with your nodes. You can do this with a text editor, or by replacing [.replaceable]`my-node-instance-role` and running the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +sed -i.bak -e 's||my-node-instance-role|' aws-auth-cm.yaml +---- ++ +Don't modify any other lines in this file. ++ +IMPORTANT: The role ARN can't include a path such as `role/my-team/developers/my-role`. The format of the ARN must be `{arn-aws}iam::[.replaceable]``111122223333``:role/[.replaceable]``my-role```. In this example, `my-team/developers/` needs to be removed. ++ +You can inspect the {aws} CloudFormation stack outputs for your node groups and look for the following values: ++ +*** *InstanceRoleARN* – For node groups that were created with `eksctl` +*** *NodeInstanceRole* – For node groups that were created with Amazon EKS vended {aws} CloudFormation templates in the {aws-management-console} +.. Apply the configuration. This command may take a few minutes to finish. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f aws-auth-cm.yaml +---- ++ +NOTE: If you receive any authorization or resource type errors, see <> in the troubleshooting topic. +. Watch the status of your nodes and wait for them to reach the `Ready` status. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes --watch +---- ++ +Enter `Ctrl`+``C`` to return to a shell prompt. diff --git a/latest/ug/manage-access/k8s-access/authenticate-oidc-identity-provider.adoc b/latest/ug/manage-access/k8s-access/authenticate-oidc-identity-provider.adoc new file mode 100644 index 00000000..6e9d3db7 --- /dev/null +++ b/latest/ug/manage-access/k8s-access/authenticate-oidc-identity-provider.adoc @@ -0,0 +1,193 @@ +//!!NODE_ROOT
+ +[.topic] +[[authenticate-oidc-identity-provider,authenticate-oidc-identity-provider.title]] += Grant users access to [.noloc]`Kubernetes` with an external [.noloc]`OIDC` provider +:info_doctype: section + +include::../../attributes.txt[] + +[abstract] +-- +Learn how to authenticate users for your Amazon EKS cluster using OpenID Connect (OIDC) identity providers to manage access and permissions with roles, bindings, and RBAC authorization. +-- + +Amazon EKS supports using [.noloc]`OpenID Connect` ([.noloc]`OIDC`) identity providers as a method to authenticate users to your cluster. [.noloc]`OIDC` identity providers can be used with, or as an alternative to {aws} Identity and Access Management (IAM). For more information about using IAM, see <>. After configuring authentication to your cluster, you can create [.noloc]`Kubernetes` `roles` and `clusterroles` to assign permissions to the roles, and then bind the roles to the identities using [.noloc]`Kubernetes` `rolebindings` and `clusterrolebindings`. For more information, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the [.noloc]`Kubernetes` documentation. + + + +* You can associate one [.noloc]`OIDC` identity provider to your cluster. +* [.noloc]`Kubernetes` doesn't provide an [.noloc]`OIDC` identity provider. You can use an existing public [.noloc]`OIDC` identity provider, or you can run your own identity provider. For a list of certified providers, see https://openid.net/certification/[OpenID Certification] on the OpenID site. +* The issuer URL of the [.noloc]`OIDC` identity provider must be publicly accessible, so that Amazon EKS can discover the signing keys. Amazon EKS doesn't support [.noloc]`OIDC` identity providers with self-signed certificates. +* You can't disable IAM authentication to your cluster, because it's still required for joining nodes to a cluster. +* An Amazon EKS cluster must still be created by an {aws} link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"], rather than an [.noloc]`OIDC` identity provider user. This is because the cluster creator interacts with the Amazon EKS APIs, rather than the [.noloc]`Kubernetes` APIs. +* [.noloc]`OIDC` identity provider-authenticated users are listed in the cluster's audit log if CloudWatch logs are turned on for the control plane. For more information, see <>. +* You can't sign in to the {aws-management-console} with an account from an [.noloc]`OIDC` provider. You can only <> by signing into the {aws-management-console} with an {aws} Identity and Access Management account. + + +[[associate-oidc-identity-provider,associate-oidc-identity-provider.title]] +== Associate an [.noloc]`OIDC` identity provider + +Before you can associate an [.noloc]`OIDC` identity provider with your cluster, you need the following information from your provider: + + + +*Issuer URL*:: +The URL of the OIDC identity provider that allows the API server to discover public signing keys for verifying tokens. The URL must begin with `https://` and should correspond to the `iss` claim in the provider's OIDC ID tokens. In accordance with the OIDC standard, path components are allowed but query parameters are not. Typically the URL consists of only a host name, like `https://server.example.org` or `https://example.com`. This URL should point to the level below `.well-known/openid-configuration` and must be publicly accessible over the internet. + + +*Client ID (also known as _audience_)*:: +The ID for the client application that makes authentication requests to the OIDC identity provider. + +You can associate an identity provider using `eksctl` or the {aws-management-console}. + + +[[identity-associate-eksctl,identity-associate-eksctl.title]] +=== Associate an identity provider using eksctl + +. Create a file named [.replaceable]`associate-identity-provider.yaml` with the following contents. Replace the [.replaceable]`example values` with your own. The values in the `identityProviders` section are obtained from your [.noloc]`OIDC` identity provider. Values are only required for the `name`, `type`, `issuerUrl`, and `clientId` settings under `identityProviders`. ++ +[source,yaml,subs="verbatim,attributes"] +---- +--- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: my-cluster + region: your-region-code + +identityProviders: + - name: my-provider + type: oidc + issuerUrl: https://example.com + clientId: kubernetes + usernameClaim: email + usernamePrefix: my-username-prefix + groupsClaim: my-claim + groupsPrefix: my-groups-prefix + requiredClaims: + string: string + tags: + env: dev +---- ++ +IMPORTANT: Don't specify `system:`, or any portion of that string, for `groupsPrefix` or `usernamePrefix`. +. Create the provider. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl associate identityprovider -f associate-identity-provider.yaml +---- +. To use `kubectl` to work with your cluster and [.noloc]`OIDC` identity provider, see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-kubectl[Using kubectl] in the [.noloc]`Kubernetes` documentation. + +[[identity-associate-console,identity-associate-console.title]] +=== Associate an identity provider using the {aws} Console +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Select your cluster, and then select the *Access* tab. +. In the *[.noloc]`OIDC` Identity Providers* section, select** Associate Identity Provider**. +. On the *Associate [.noloc]`OIDC` Identity Provider* page, enter or select the following options, and then select *Associate*. ++ +** For *Name*, enter a unique name for the provider. +** For *Issuer URL*, enter the URL for your provider. This URL must be accessible over the internet. +** For *Client ID*, enter the [.noloc]`OIDC` identity provider's client ID (also known as *audience*). +** For *Username claim*, enter the claim to use as the username. +** For *Groups claim*, enter the claim to use as the user's group. +** (Optional) Select *Advanced options*, enter or select the following information. ++ +*** *Username prefix* – Enter a prefix to prepend to username claims. The prefix is prepended to username claims to prevent clashes with existing names. If you do not provide a value, and the username is a value other than `email`, the prefix defaults to the value for *Issuer URL*. You can use the value`` -`` to disable all prefixing. Don't specify `system:` or any portion of that string. +*** *Groups prefix* – Enter a prefix to prepend to groups claims. The prefix is prepended to group claims to prevent clashes with existing names (such as`` system: groups``). For example, the value `oidc:` creates group names like `oidc:engineering` and `oidc:infra`. Don't specify `system:` or any portion of that string.. +*** *Required claims* – Select *Add claim* and enter one or more key value pairs that describe required claims in the client ID token. The pairs describe required claims in the ID Token. If set, each claim is verified to be present in the ID token with a matching value. +.. To use `kubectl` to work with your cluster and [.noloc]`OIDC` identity provider, see https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-kubectl[Using kubectl] in the [.noloc]`Kubernetes` documentation. + + +[[oidc-identity-provider-iam-policy,oidc-identity-provider-iam-policy.title]] +== Example IAM policy + +If you want to prevent an [.noloc]`OIDC` identity provider from being associated with a cluster, create and associate the following IAM policy to the IAM accounts of your Amazon EKS administrators. For more information, see link:IAM/latest/UserGuide/access_policies_create.html[Creating IAM policies,type="documentation"] and link:IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policies-console[Adding IAM identity permissions,type="documentation"] in the _IAM User Guide_ and link:service-authorization/latest/reference/list_amazonelasticcontainerserviceforkubernetes.html[Actions, resources, and condition keys for Amazon Elastic Kubernetes Service,type="documentation"] in the Service Authorization Reference. + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "denyOIDC", + "Effect": "Deny", + "Action": [ + "eks:AssociateIdentityProviderConfig" + ], + "Resource": "{arn-aws}eks:us-west-2.amazonaws.com:111122223333:cluster/*" + + }, + { + "Sid": "eksAdmin", + "Effect": "Allow", + "Action": [ + "eks:*" + ], + "Resource": "*" + } + ] +} +---- + +The following example policy allows [.noloc]`OIDC` identity provider association if the `clientID` is `kubernetes` and the `issuerUrl` is `https://cognito-idp.us-west-2amazonaws.com/*`. + +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowCognitoOnly", + "Effect": "Deny", + "Action": "eks:AssociateIdentityProviderConfig", + "Resource": "{arn-aws}eks:us-west-2:111122223333:cluster/my-instance", + "Condition": { + "StringNotLikeIfExists": { + "eks:issuerUrl": "https://cognito-idp.us-west-2.amazonaws.com/*" + } + } + }, + { + "Sid": "DenyOtherClients", + "Effect": "Deny", + "Action": "eks:AssociateIdentityProviderConfig", + "Resource": "{arn-aws}eks:us-west-2:111122223333:cluster/my-instance", + "Condition": { + "StringNotEquals": { + "eks:clientId": "kubernetes" + } + } + }, + { + "Sid": "AllowOthers", + "Effect": "Allow", + "Action": "eks:*", + "Resource": "*" + } + ] +} +---- + + +[[partner-validated-identity-providers,partner-validated-identity-providers.title]] +== Partner validated [.noloc]`OIDC` identity providers + +Amazon EKS maintains relationships with a network of partners that offer support for compatible [.noloc]`OIDC` identity providers. Refer to the following partners' documentation for details on how to integrate the identity provider with Amazon EKS. + +[cols="1,1,1", options="header"] +|=== +|Partner +|Product +|Documentation + + +|PingIdentity +|https://docs.pingidentity.com/r/en-us/pingoneforenterprise/p14e_landing[PingOne for Enterprise] +|https://docs.pingidentity.com/r/en-us/solution-guides/htg_config_oidc_authn_aws_eks_custers[Installation instructions] +|=== + +Amazon EKS aims to give you a wide selection of options to cover all use cases. If you develop a commercially supported [.noloc]`OIDC` compatible identity provider that is not listed here, then contact our partner team at link:mailto:aws-container-partners@amazon.com[aws-container-partners@amazon. +com] for more information. \ No newline at end of file diff --git a/latest/ug/manage-access/k8s-access/disassociate-oidc-identity-provider.adoc b/latest/ug/manage-access/k8s-access/disassociate-oidc-identity-provider.adoc new file mode 100644 index 00000000..c5ccae3a --- /dev/null +++ b/latest/ug/manage-access/k8s-access/disassociate-oidc-identity-provider.adoc @@ -0,0 +1,13 @@ +//!!NODE_ROOT
+ +[.topic] +[[disassociate-oidc-identity-provider,disassociate-oidc-identity-provider.title]] += Disassociate an [.noloc]`OIDC` identity provider from your cluster +:info_doctype: section + +include::../../attributes.txt[] + +If you disassociate an [.noloc]`OIDC` identity provider from your cluster, users included in the provider can no longer access the cluster. However, you can still access the cluster with link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"]. + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. In the *[.noloc]`OIDC` Identity Providers* section, select *Disassociate*, enter the identity provider name, and then select `Disassociate`. \ No newline at end of file diff --git a/latest/ug/manage-access/k8s-access/grant-k8s-access.adoc b/latest/ug/manage-access/k8s-access/grant-k8s-access.adoc new file mode 100644 index 00000000..42781274 --- /dev/null +++ b/latest/ug/manage-access/k8s-access/grant-k8s-access.adoc @@ -0,0 +1,119 @@ +//!!NODE_ROOT
+ + +[.topic] +[[grant-k8s-access,grant-k8s-access.title]] += Grant [.noloc]`IAM` users and roles access to Kubernetes [.noloc]`APIs` +:info_doctype: section +:info_title: Grant IAM users and roles access to Kubernetes APIs +:info_titleabbrev: Grant access to Kubernetes APIs +:info_abstract: Learn how to grant access to Kubernetes APIs on Amazon EKS clusters using IAM roles, users, or OpenID Connect providers, and manage permissions with access entries or the aws-auth ConfigMap. + +include::../../attributes.txt[] + +[abstract] +-- +Learn how to grant access to Kubernetes APIs on Amazon EKS clusters using IAM roles, users, or OpenID Connect providers, and manage permissions with access entries or the aws-auth ConfigMap. +-- + +Your cluster has an [.noloc]`Kubernetes` API endpoint. Kubectl uses this API. You can authenticate to this API using two types of identities: + + + +* *An {aws} Identity and Access Management (IAM) _principal_ (role or user)* + – This type requires authentication to IAM. Users can sign in to {aws} as an link:IAM/latest/UserGuide/introduction.html[IAM,type="documentation"] user or with a link:identity/federation/[federated identity,type="marketing"] by using credentials provided through an identity source. Users can only sign in with a federated identity if your administrator previously set up identity federation using IAM roles. When users access {aws} by using federation, they're indirectly link:IAM/latest/UserGuide/when-to-use-iam.html#security-iam-authentication-iamrole[assuming a role,type="documentation"]. When users use this type of identity, you: ++ +** Can assign them [.noloc]`Kubernetes` permissions so that they can work with [.noloc]`Kubernetes` objects on your cluster. For more information about how to assign permissions to your IAM principals so that they're able to access [.noloc]`Kubernetes` objects on your cluster, see <>. +** Can assign them IAM permissions so that they can work with your Amazon EKS cluster and its resources using the Amazon EKS API, {aws} CLI, {aws} CloudFormation, {aws-management-console}, or `eksctl`. For more information, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-actions-as-permissions[Actions defined by Amazon Elastic Kubernetes Service,type="documentation"] in the Service Authorization Reference. +** Nodes join your cluster by assuming an IAM role. The ability to access your cluster using IAM principals is provided by the https://github.com/kubernetes-sigs/aws-iam-authenticator#readme[{aws} IAM Authenticator for Kubernetes], which runs on the Amazon EKS control plane. +* *A user in your own [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider* + – This type requires authentication to your https://openid.net/connect/[OIDC] provider. For more information about setting up your own [.noloc]`OIDC` provider with your Amazon EKS cluster, see <>. When users use this type of identity, you: ++ +** Can assign them [.noloc]`Kubernetes` permissions so that they can work with [.noloc]`Kubernetes` objects on your cluster. +** Can't assign them IAM permissions so that they can work with your Amazon EKS cluster and its resources using the Amazon EKS API, {aws} CLI, {aws} CloudFormation, {aws-management-console}, or `eksctl`. + +You can use both types of identities with your cluster. The IAM authentication method cannot be disabled. The OIDC authentication method is optional. + +[[authentication-modes,authentication-modes.title]] +== Associate IAM Identities with Kubernetes Permissions + +The https://github.com/kubernetes-sigs/aws-iam-authenticator#readme[{aws} IAM Authenticator for Kubernetes] is installed on your cluster's control plane. It enables link:IAM/latest/UserGuide/introduction.html[{aws} Identity and Access Management,type="documentation"] (IAM) principals (roles and users) that you allow to access [.noloc]`Kubernetes` resources on your cluster. You can allow IAM principals to access [.noloc]`Kubernetes` objects on your cluster using one of the following methods: + + + +* *Creating access entries* + – If your cluster is at or later than the platform version listed in the link:eks/latest/userguide/access-entries.html[Prerequisites,type="documentation"] section for your cluster's [.noloc]`Kubernetes` version, we recommend that you use this option. ++ +Use _access entries_ to manage the [.noloc]`Kubernetes` permissions of IAM principals from outside the cluster. You can add and manage access to the cluster by using the EKS API, {aws} Command Line Interface, {aws} SDKs, {aws} CloudFormation, and {aws-management-console}. This means you can manage users with the same tools that you created the cluster with. ++ +To get started, follow link:eks/latest/userguide/setting-up-access-entries.html[Change authentication mode to use access entries,type="documentation"], then link:eks/latest/userguide/migrating-access-entries.html[Migrating existing aws-auth ConfigMap entries to access entries,type="documentation"]. +* *Adding entries to the `aws-auth` `ConfigMap`* + – If your cluster's platform version is earlier than the version listed in the link:eks/latest/userguide/access-entries.html[Prerequisites,type="documentation"] section, then you must use this option. If your cluster's platform version is at or later than the platform version listed in the link:eks/latest/userguide/access-entries.html[Prerequisites,type="documentation"] section for your cluster's [.noloc]`Kubernetes` version, and you've added entries to the `ConfigMap`, then we recommend that you migrate those entries to access entries. You can't migrate entries that Amazon EKS added to the `ConfigMap` however, such as entries for IAM roles used with managed node groups or Fargate profiles. For more information, see <>. ++ +** If you have to use the `aws-auth` `ConfigMap` option, you can add entries to the `ConfigMap` using the `eksctl create iamidentitymapping` command. For more information, see https://eksctl.io/usage/iam-identity-mappings/[Manage IAM users and roles] in the `eksctl` documentation. + + +[[set-cam,set-cam.title]] +== Set Cluster Authentication Mode + +Each cluster has an _authentication mode_. The authentication mode determines which methods you can use to allow IAM principals to access [.noloc]`Kubernetes` objects on your cluster. There are three authentication modes. + +[IMPORTANT] +==== + +Once the access entry method is enabled, it cannot be disabled. + +If the `ConfigMap` method is not enabled during cluster creation, it cannot be enabled later. All clusters created before the introduction of access entries have the `ConfigMap` method enabled. + +If you are using hybrid nodes with your cluster, you must use the `API` or `API_AND_CONFIG_MAP` cluster authentication modes. + +==== + +*The `aws-auth` `ConfigMap` inside the cluster*:: +This is the original authentication mode for Amazon EKS clusters. The IAM principal that created the cluster is the initial user that can access the cluster by using `kubectl`. The initial user must add other users to the list in the `aws-auth` `ConfigMap` and assign permissions that affect the other users within the cluster. These other users can't manage or remove the initial user, as there isn't an entry in the `ConfigMap` to manage. + + +*Both the `ConfigMap` and access entries*:: +With this authentication mode, you can use both methods to add IAM principals to the cluster. Note that each method stores separate entries; for example, if you add an access entry from the {aws} CLI, the `aws-auth` `ConfigMap` is not updated. + + +*Access entries only*:: +With this authentication mode, you can use the EKS API, {aws} Command Line Interface, {aws} SDKs, {aws} CloudFormation, and {aws-management-console} to manage access to the cluster for IAM principals. ++ +Each access entry has a _type_ and you can use the combination of an _access scope_ to limit the principal to a specific namespace and an _access policy_ to set preconfigured reusable permissions policies. Alternatively, you can use the [.noloc]`STANDARD` type and [.noloc]`Kubernetes` [.noloc]`RBAC` groups to assign custom permissions. + + +[cols="1,1", options="header"] +|=== +|Authentication mode +|Methods + + +|`ConfigMap` only (`CONFIG_MAP`) +|`aws-auth` `ConfigMap` + +|EKS API and `ConfigMap` (`API_AND_CONFIG_MAP`) +|access entries in the EKS API, {aws} Command Line Interface, {aws} SDKs, {aws} CloudFormation, and {aws-management-console} and `aws-auth` `ConfigMap` + +|EKS API only (`API`) +|access entries in the EKS API, {aws} Command Line Interface, {aws} SDKs, {aws} CloudFormation, and {aws-management-console} +|=== + +[NOTE] +==== +Amazon EKS Auto Mode requires Access entries. +==== + +include::access-entries.adoc[leveloffset=+1] + +//include::access-policies.adoc[leveloffset=+1] + +//include::migrating-access-entries.adoc[leveloffset=+1] + +include::auth-configmap.adoc[leveloffset=+1] + +include::authenticate-oidc-identity-provider.adoc[leveloffset=+1] + +include::disassociate-oidc-identity-provider.adoc[leveloffset=+1] + +//include::access-policy-reference.adoc[leveloffset=+1] diff --git a/latest/ug/manage-access/k8s-access/migrating-access-entries.adoc b/latest/ug/manage-access/k8s-access/migrating-access-entries.adoc new file mode 100644 index 00000000..4c91b06f --- /dev/null +++ b/latest/ug/manage-access/k8s-access/migrating-access-entries.adoc @@ -0,0 +1,59 @@ + +//!!NODE_ROOT
+ +[.topic] +[[migrating-access-entries,migrating-access-entries.title]] += Migrating existing `aws-auth ConfigMap` entries to access entries +:info_doctype: section + +include::../../attributes.txt[] + +//GDC: problems with xrefs + +If you've added entries to the `aws-auth` `ConfigMap` on your cluster, we recommend that you create access entries for the existing entries in your `aws-auth` `ConfigMap`. After creating the access entries, you can remove the entries from your `ConfigMap`. You can't associate link:eks/latest/userguide/access-policies.html[access policies,type="documentation"] to entries in the `aws-auth` `ConfigMap`. If you want to associate access polices to your IAM principals, create access entries. + +[IMPORTANT] +==== + +Don't remove existing `aws-auth` `ConfigMap` entries that were created by Amazon EKS when you added a link:eks/latest/userguide/managed-node-groups.html[managed node group,type="documentation"] or a link:eks/latest/userguide/fargate-profile.html["Fargate profile",type="documentation"] to your cluster. If you remove entries that Amazon EKS created in the `ConfigMap`, your cluster won't function properly. You can however, remove any entries for link:eks/latest/userguide/worker.html["self-managed",type="documentation"] node groups after you've created access entries for them. + +==== + + +== Prerequisites [[migrating_access_entries_prereq]] + +* Familiarity with access entries and access policies. For more information, see <> and <>. +* An existing cluster with a platform version that is at or later than the versions listed in the Prerequisites of the <> topic. +* Version `{eksctl-min-version}` or later of the `eksctl` command line tool installed on your device or {aws} CloudShell. To install or update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. +* [.noloc]`Kubernetes` permissions to modify the `aws-auth` `ConfigMap` in the `kube-system` namespace. +* An {aws} Identity and Access Management role or user with the following permissions: `CreateAccessEntry` and `ListAccessEntries`. For more information, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html#amazonelastickubernetesservice-actions-as-permissions[Actions defined by Amazon Elastic Kubernetes Service,type="documentation"] in the Service Authorization Reference. + + +== `eksctl` [[migrating_access_entries_eksctl]] + +. View the existing entries in your `aws-auth ConfigMap`. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl get iamidentitymapping --cluster my-cluster +---- ++ +An example output is as follows. ++ +[source,subs="verbatim,attributes"] +---- +ARN USERNAME GROUPS ACCOUNT +{arn-aws}iam::111122223333:role/EKS-my-cluster-Admins Admins system:masters +{arn-aws}iam::111122223333:role/EKS-my-cluster-my-namespace-Viewers my-namespace-Viewers Viewers +{arn-aws}iam::111122223333:role/EKS-my-cluster-self-managed-ng-1 system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes +{arn-aws}iam::111122223333:user/my-user my-user +{arn-aws}iam::111122223333:role/EKS-my-cluster-fargateprofile1 system:node:{{SessionName}} system:bootstrappers,system:nodes,system:node-proxier +{arn-aws}iam::111122223333:role/EKS-my-cluster-managed-ng system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes +---- +. <> for any of the `ConfigMap` entries that you created returned in the previous output. When creating the access entries, make sure to specify the same values for `ARN`, `USERNAME`, `GROUPS`, and `ACCOUNT` returned in your output. In the example output, you would create access entries for all entries except the last two entries, since those entries were created by Amazon EKS for a Fargate profile and a managed node group. +. Delete the entries from the `ConfigMap` for any access entries that you created. If you don't delete the entry from the `ConfigMap`, the settings for the access entry for the IAM principal ARN override the `ConfigMap` entry. Replace [.replaceable]`111122223333` with your {aws} account ID and [.replaceable]`EKS-my-cluster-my-namespace-Viewers` with the name of the role in the entry in your `ConfigMap`. If the entry you're removing is for an IAM user, rather than an IAM role, replace `role` with `user` and [.replaceable]`EKS-my-cluster-my-namespace-Viewers` with the user name. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl delete iamidentitymapping --arn {arn-aws}iam::111122223333:role/EKS-my-cluster-my-namespace-Viewers --cluster my-cluster +---- \ No newline at end of file diff --git a/latest/ug/manage-access/view-kubernetes-resources.adoc b/latest/ug/manage-access/view-kubernetes-resources.adoc new file mode 100644 index 00000000..7a66764b --- /dev/null +++ b/latest/ug/manage-access/view-kubernetes-resources.adoc @@ -0,0 +1,236 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[view-kubernetes-resources,view-kubernetes-resources.title]] += View [.noloc]`Kubernetes` resources in the {aws-management-console} +:info_doctype: section +:info_title: View Kubernetes resources in the {aws-management-console} +:info_titleabbrev: Access cluster resources with console +:info_abstract: Learn how to view Kubernetes resources in the {aws-management-console}. + +[abstract] +-- +Learn how to view [.noloc]`Kubernetes` resources in the {aws-management-console}. +-- + +You can view the [.noloc]`Kubernetes` resources deployed to your cluster with the {aws-management-console}. You can't view [.noloc]`Kubernetes` resources with the {aws} CLI or https://eksctl.io/[eksctl]. To view [.noloc]`Kubernetes` resources using a command-line tool, use <>. + +[NOTE] +==== +To view the *Resources* tab and *Nodes* section on the *Compute* tab in the {aws-management-console}, the link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that you're using must have specific IAM and [.noloc]`Kubernetes` permissions. For more information, see <>. +==== + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. In the *Clusters* list, select the cluster that contains the [.noloc]`Kubernetes` resources that you want to view. +. Select the *Resources* tab. +. Select a *Resource type* group that you want to view resources for, such as *Workloads*. You see a list of resource types in that group. +. Select a resource type, such as *Deployments*, in the *Workloads* group. You see a description of the resource type, a link to the [.noloc]`Kubernetes` documentation for more information about the resource type, and a list of resources of that type that are deployed on your cluster. If the list is empty, then there are no resources of that type deployed to your cluster. +. Select a resource to view more information about it. Try the following examples: ++ +** Select the *Workloads* group, select the *Deployments* resource type, and then select the *coredns* resource. When you select a resource, you are in *Structured view*, by default. For some resource types, you see a *Pods* section in *Structured view*. This section lists the [.noloc]`Pods` managed by the workload. You can select any [.noloc]`Pod` listed to view information about the [.noloc]`Pod`. Not all resource types display information in *Structured View*. If you select *Raw view* in the top right corner of the page for the resource, you see the complete JSON response from the [.noloc]`Kubernetes` API for the resource. +** Select the *Cluster* group and then select the *Nodes* resource type. You see a list of all nodes in your cluster. The nodes can be any <>. This is the same list that you see in the *Nodes* section when you select the *Compute* tab for your cluster. Select a node resource from the list. In *Structured view*, you also see a *Pods* section. This section shows you all [.noloc]`Pods` running on the node. + + +[[view-kubernetes-resources-permissions,view-kubernetes-resources-permissions.title]] +== Required permissions + +To view the *Resources* tab and *Nodes* section on the *Compute* tab in the {aws-management-console}, the link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that you're using must have specific minimum IAM and [.noloc]`Kubernetes` permissions. Complete the following steps to assign the required permissions to your IAM principals. + +. Make sure that the `eks:AccessKubernetesApi`, and other necessary IAM permissions to view [.noloc]`Kubernetes` resources, are assigned to the IAM principal that you're using. For more information about how to edit permissions for an IAM principal, see link:IAM/latest/UserGuide/access_controlling.html#access_controlling-principals[Controlling access for principals,type="documentation"] in the IAM User Guide. For more information about how to edit permissions for a role, see link:IAM/latest/UserGuide/roles-managingrole-editing-console.html#roles-modify_permissions-policy[Modifying a role permissions policy (console),type="documentation"] in the IAM User Guide. ++ +The following example policy includes the necessary permissions for a principal to view [.noloc]`Kubernetes` resources for all clusters in your account. Replace [.replaceable]`111122223333` with your {aws} account ID. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "eks:ListFargateProfiles", + "eks:DescribeNodegroup", + "eks:ListNodegroups", + "eks:ListUpdates", + "eks:AccessKubernetesApi", + "eks:ListAddons", + "eks:DescribeCluster", + "eks:DescribeAddonVersions", + "eks:ListClusters", + "eks:ListIdentityProviderConfigs", + "iam:ListRoles" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "ssm:GetParameter", + "Resource": "{arn-aws}ssm:*:111122223333:parameter/*" + } + ] +} +---- ++ +To view nodes in <>, the <> should be able to impersonate the principal in the cluster. This allows the <> to map the principal to a [.noloc]`Kubernetes` user. +. Create a [.noloc]`Kubernetes` `rolebinding` or `clusterrolebinding` that is bound to a [.noloc]`Kubernetes` `role` or `clusterrole` that has the necessary permissions to view the [.noloc]`Kubernetes` resources. To learn more about [.noloc]`Kubernetes` roles and role bindings, see https://kubernetes.io/docs/reference/access-authn-authz/rbac/[Using RBAC Authorization] in the [.noloc]`Kubernetes` documentation. You can apply one of the following manifests to your cluster that create a `role` and `rolebinding` or a `clusterrole` and `clusterrolebinding` with the necessary [.noloc]`Kubernetes` permissions: ++ +View [.noloc]`Kubernetes` resources in all namespaces::: +** The group name in the file is `eks-console-dashboard-full-access-group`. Apply the manifest to your cluster with the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +---- + + +View [.noloc]`Kubernetes` resources in a specific namespace::: +** The namespace in this file is `default`. The group name in the file is `eks-console-dashboard-restricted-access-group`. Apply the manifest to your cluster with the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +---- ++ +If you need to change the [.noloc]`Kubernetes` group name, namespace, permissions, or any other configuration in the file, then download the file and edit it before applying it to your cluster: ++ +.. Download the file with one of the following commands: ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +---- ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +---- +.. Edit the file as necessary. +.. Apply the manifest to your cluster with one of the following commands: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f eks-console-full-access.yaml +---- ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f eks-console-restricted-access.yaml +---- +. Map the link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] to the [.noloc]`Kubernetes` user or group in the `aws-auth` `ConfigMap`. You can use a tool such as `eksctl` to update the `ConfigMap` or you can update it manually by editing it. ++ +IMPORTANT: We recommend using `eksctl`, or another tool, to edit the `ConfigMap`. For information about other tools you can use, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#use-tools-to-make-changes-to-the-aws-auth-configmap[Use tools to make changes to the aws-authConfigMap] in the Amazon EKS best practices guides. An improperly formatted `aws-auth` `ConfigMap` can cause you to lose access to your cluster. + + +=== Edit with eksctl + +. You need version `{eksctl-min-version}` or later of the `eksctl` command line tool installed on your device or {aws} CloudShell. To install or update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. ++ +. View the current mappings in the `ConfigMap`. Replace [.replaceable]`my-cluster` with the name of your cluster. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl get iamidentitymapping --cluster my-cluster --region=region-code +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +ARN USERNAME GROUPS ACCOUNT +{arn-aws}iam::111122223333:role/eksctl-my-cluster-my-nodegroup-NodeInstanceRole-1XLS7754U3ZPA system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes +---- +. Add a mapping for a role. This example assume that you attached the IAM permissions in the first step to a role named [.replaceable]`my-console-viewer-role`. Replace [.replaceable]`111122223333` with your account ID. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create iamidentitymapping \ + --cluster my-cluster \ + --region=region-code \ + --arn {arn-aws}iam::111122223333:role/my-console-viewer-role \ + --group eks-console-dashboard-full-access-group \ + --no-duplicate-arns +---- ++ +IMPORTANT: The role ARN can't include a path such as `role/my-team/developers/my-role`. The format of the ARN must be `{arn-aws}iam::[.replaceable]``111122223333``:role/[.replaceable]``my-role```. In this example, `my-team/developers/` needs to be removed. ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +[...] +2022-05-09 14:51:20 [ℹ] adding identity "{arn-aws}iam::111122223333:role/my-console-viewer-role" to auth ConfigMap +---- +. Add a mapping for a user. link:IAM/latest/UserGuide/id_users.html[IAM best practices,type="documentation"] recommend that you grant permissions to roles instead of users. This example assume that you attached the IAM permissions in the first step to a user named [.replaceable]`my-user`. Replace [.replaceable]`111122223333` with your account ID. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create iamidentitymapping \ + --cluster my-cluster \ + --region=region-code \ + --arn {arn-aws}iam::111122223333:user/my-user \ + --group eks-console-dashboard-restricted-access-group \ + --no-duplicate-arns +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +[...] +2022-05-09 14:53:48 [ℹ] adding identity "{arn-aws}iam::111122223333:user/my-user" to auth ConfigMap +---- +. View the mappings in the `ConfigMap` again. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl get iamidentitymapping --cluster my-cluster --region=region-code +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +ARN USERNAME GROUPS ACCOUNT +{arn-aws}iam::111122223333:role/eksctl-my-cluster-my-nodegroup-NodeInstanceRole-1XLS7754U3ZPA system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes +{arn-aws}iam::111122223333:role/my-console-viewer-role eks-console-dashboard-full-access-group +{arn-aws}iam::111122223333:user/my-user eks-console-dashboard-restricted-access-group +---- + + +=== Edit ConfigMap manually + +For more information about adding users or roles to the `aws-auth` `ConfigMap`, see <>. + + +. Open the `aws-auth` `ConfigMap` for editing. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit -n kube-system configmap/aws-auth +---- +. Add the mappings to the `aws-auth` `ConfigMap`, but don't replace any of the existing mappings. The following example adds mappings between link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principals,type="documentation"] with permissions added in the first step and the [.noloc]`Kubernetes` groups created in the previous step: ++ +** The [.replaceable]`my-console-viewer-role` role and the `eks-console-dashboard-full-access-group`. +** The [.replaceable]`my-user` user and the `eks-console-dashboard-restricted-access-group`. ++ +These examples assume that you attached the IAM permissions in the first step to a role named [.replaceable]`my-console-viewer-role` and a user named [.replaceable]`my-user`. Replace [.replaceable]`111122223333` with your {aws} account ID. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: v1 +data: +mapRoles: | + - groups: + - eks-console-dashboard-full-access-group + rolearn: {arn-aws}iam::111122223333:role/my-console-viewer-role + username: my-console-viewer-role +mapUsers: | + - groups: + - eks-console-dashboard-restricted-access-group + userarn: {arn-aws}iam::111122223333:user/my-user + username: my-user +---- ++ +IMPORTANT: The role ARN can't include a path such as `role/my-team/developers/my-console-viewer-role`. The format of the ARN must be `{arn-aws}iam::[.replaceable]``111122223333``:role/[.replaceable]``my-console-viewer-role```. In this example, `my-team/developers/` needs to be removed. +. Save the file and exit your text editor. diff --git a/latest/ug/ml/capacity-blocks-mng.adoc b/latest/ug/ml/capacity-blocks-mng.adoc new file mode 100644 index 00000000..c8d664dc --- /dev/null +++ b/latest/ug/ml/capacity-blocks-mng.adoc @@ -0,0 +1,89 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[capacity-blocks-mng,capacity-blocks-mng.title]] += Create a managed node group with Capacity Blocks for ML +:info_titleabbrev: Reserve GPUs for MNG + +[abstract] +-- +Capacity Blocks for machine learning (ML) allow you to reserve highly sought-after GPU instances on a future date to support your short duration ML workloads. +-- + +Capacity Blocks for machine learning (ML) allow you to reserve GPU instances on a future date to support your short duration ML workloads. For more information, see link:AWSEC2/latest/UserGuide/ec2-capacity-blocks.html[Capacity Blocks for ML,type="documentation"] in the _Amazon EC2 User Guide for Linux Instances_. + +[[capacity-blocks-mng-considerations,capacity-blocks-mng-considerations.title]] +== Considerations + +[IMPORTANT] +==== + + +* Capacity Blocks are only available for certain Amazon EC2 instance types and {aws} Regions. For compatibility information, see link:AWSEC2/latest/UserGuide/capacity-blocks-using.html#capacity-blocks-prerequisites[Work with Capacity Blocks Prerequisites,type="documentation"] in the _Amazon EC2 User Guide for Linux Instances_. +* For more information, see link:autoscaling/ec2/userguide/launch-template-capacity-blocks.html[Use Capacity Blocks for machine learning workloads,type="documentation"] in the _Amazon EC2 Auto Scaling User Guide_. +* Managed node groups with Capacity Blocks can only be created with custom launch templates. +* When upgrading managed node groups with Capacity Blocks, make sure that the desired size of the node group is set to `0`. + +==== + +[[capacity-blocks-mng-procedure,capacity-blocks-mng-procedure.title]] +== Create a managed node group with Amazon EC2 Capacity Blocks + +You can use Capacity Blocks with Amazon EKS managed node groups for provisioning and scaling GPU-accelerated worker nodes. The {aws} CloudFormation template examples that follow don't cover every aspect needed in a production clusters. Typically, you'd also want a bootstrapping script to join the node to the cluster and specify an Amazon EKS accelerated AMI. For more information, see <>. + +. Create a launch template that's appropriate for your workloads and works with Amazon EKS managed node groups. For more information, see <>. ++ +In addition to the requirements in the above procedures, make sure that the `LaunchTemplateData` includes the following: ++ +** `InstanceMarketOptions` with `MarketType` set to `"capacity-block"` +** `CapacityReservationSpecification: CapacityReservationTarget` with `CapacityReservationId` set to the Capacity Block (for example: `cr-[.replaceable]``02168da1478b509e0``` ) +** `InstanceType` set to an instance type that supports Capacity Blocks (for example: [.replaceable]`p5.48xlarge`) ++ +The following is an excerpt of a CloudFormation template that creates a launch template targeting a Capacity Block. To create a custom AMI managed node group, you can also add `ImageId` and `UserData` parameters. ++ +[source,yaml,subs="verbatim,attributes"] +---- +NodeLaunchTemplate: + Type: "{aws}::EC2::LaunchTemplate" + Properties: + LaunchTemplateData: + InstanceMarketOptions: + MarketType: "capacity-block" + CapacityReservationSpecification: + CapacityReservationTarget: + CapacityReservationId: "cr-02168da1478b509e0" + InstanceType: p5.48xlarge +---- +. Use the launch template to create a managed node group. ++ +The following is an example create node group command for Capacity Blocks. Replace [.replaceable]`example-values` with ones applicable to your cluster. ++ +When creating the Capacity Block managed node group, do the following: ++ +** Set the `capacity-type` to `"CAPACITY_BLOCK"`. If the capacity type isn't set to `"CAPACITY_BLOCK"` or any of the other above required launch template values are missing, then the create request will be rejected. +** When specifying `subnets` in the create request, make sure to only specify the subnet in the same Availability Zone as the capacity reservation. +** If you specify a non-zero `desiredSize` in the create request, Amazon EKS will honor that when creating the Auto Scaling group (ASG). However, if the create request is made before the capacity reservation is active, then the ASG won't be able to launch Amazon EC2 instances until it becomes active. As a result, ASG scaling activities will have launch errors. Whenever the reservation becomes active, then the launch of instances will succeed and the ASG will be scaled up to the `desiredSize` mentioned at create time. ++ +[source,yaml,subs="verbatim,attributes"] +---- +aws eks create-nodegroup \ + --cluster-name my-cluster \ + --nodegroup-name my-mng \ + --node-role node-role-arn \ + --region region-code \ + --subnets subnet-id \ + --scaling-config minSize=node-group-min-size,maxSize=node-group-max-size,desiredSize=node-group-desired-size \ + --ami-type "AL2023_x86_64_NVIDIA" \ + --capacity-type "CAPACITY_BLOCK" \ + --launch-template id="lt-id",version=1 +---- +. Make sure that the nodes join after scale up. Amazon EKS clusters using managed node groups with Capacity Blocks don't perform any validations that instances launched actually join and register with the cluster. +. If you set `desiredSize` to `0` at create time, then you have different options to scale up the node group when the capacity reservation becomes active: ++ +** Create a scheduled scaling policy for the ASG that aligns to the Capacity Block reservation start time. For more information, see link:autoscaling/ec2/userguide/ec2-auto-scaling-scheduled-scaling.html[Scheduled scaling for Amazon EC2 Auto Scaling,type="documentation"] in the _Amazon EC2 Auto Scaling User Guide_. +** Use the Amazon EKS console or `eks update-nodegroup-config` to update the scaling config and set the desired size of the node group. +** Use the [.noloc]`Kubernetes` Cluster Autoscaler. For more information, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler on {aws}]. +. The node group is now ready for workloads and [.noloc]`Pods` to be scheduled. +. In order for your [.noloc]`Pods` to be gracefully drained before reservation ends, Amazon EKS uses a scheduled scaling policy to scale down the node group size to `0` . This scheduled scaling will be set with name titled `Amazon EKS Node Group Capacity Scaledown Before Reservation End` . We recommend not editing or deleting this action. ++ +Amazon EC2 starts shutting down the instances 30 minutes before reservation end time. As a result, Amazon EKS will setup a scheduled scale down on the node group 40 minutes prior to their reservation end in order to safely and gracefully evict [.noloc]`Pods`. diff --git a/latest/ug/ml/capacity-blocks.adoc b/latest/ug/ml/capacity-blocks.adoc new file mode 100644 index 00000000..8f9ab3eb --- /dev/null +++ b/latest/ug/ml/capacity-blocks.adoc @@ -0,0 +1,103 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[capacity-blocks,capacity-blocks.title]] += Create self-managed nodes with Capacity Blocks for ML +:info_titleabbrev: Reserve GPUs for SMN + +[abstract] +-- +Capacity Blocks for machine learning (ML) allow you to reserve highly sought-after GPU instances on a future date to support your short duration ML workloads. +-- + +Capacity Blocks for machine learning (ML) allow you to reserve GPU instances on a future date to support your short duration ML workloads. For more information, see link:AWSEC2/latest/UserGuide/ec2-capacity-blocks.html[Capacity Blocks for ML,type="documentation"] in the _Amazon EC2 User Guide for Linux Instances_. + +[[capacity-blocks-considerations,capacity-blocks-considerations.title]] +== Considerations + +[IMPORTANT] +==== + + +* Capacity Blocks are only available for certain Amazon EC2 instance types and {aws} Regions. For compatibility information, see link:AWSEC2/latest/UserGuide/capacity-blocks-using.html#capacity-blocks-prerequisites[Work with Capacity Blocks Prerequisites,type="documentation"] in the _Amazon EC2 User Guide for Linux Instances_. +* Capacity Blocks currently cannot be used with [.noloc]`Karpenter`. +* If you create a self-managed node group prior to the capacity reservation becoming active, then set the desired capacity to `0`. +* To allow sufficient time to gracefully drain the node(s), we suggest that you schedule scaling to scale to zero more than 30 minutes before the Capacity Block reservation end time. +* In order for your [.noloc]`Pods` to be gracefully drained, we recommend that you set up {aws} Node Termination Handler as explained in the example steps. + +==== + +[[capacity-blocks-procedure,capacity-blocks-procedure.title]] +== Use Capacity Blocks with self-managed nodes + +You can use Capacity Blocks with Amazon EKS for provisioning and scaling your self-managed nodes. The following steps give a general example overview. The {aws} CloudFormation template examples don't cover every aspect needed in a production workload. Typically you'd also want a bootstrapping script to join the node to the cluster, specify an Amazon EKS accelerated AMI, and an appropriate instance profile for joining the cluster. For more information, see <>. + +. Create a launch template that's applicable to your workload. For more information, see link:autoscaling/ec2/userguide/launch-template-capacity-blocks.html[Use Capacity Blocks for machine learning workloads,type="documentation"] in the _Amazon EC2 Auto Scaling User Guide_. ++ +Make sure the `LaunchTemplateData` includes the following: ++ +** `InstanceMarketOptions` with `MarketType` set to `"capacity-block"` +** `CapacityReservationSpecification: CapacityReservationTarget` with `CapacityReservationId` set to the Capacity Block (for example: `cr-[.replaceable]``02168da1478b509e0``` ) +** `IamInstanceProfile` with the `Arn` set to the applicable [.replaceable]`iam-instance-profile-arn` +** `ImageId` set to the applicable [.replaceable]`image-id` +** `InstanceType` set to an instance type that supports Capacity Blocks (for example: [.replaceable]`p5.48xlarge`) +** `SecurityGroupIds` set to the applicable IDs (for example: [.replaceable]`sg-05b1d815d1EXAMPLE`) +** `UserData` set to the applicable [.replaceable]`user-data` for your self-managed node group ++ +The following is an excerpt of a CloudFormation template that creates a launch template targeting a Capacity Block. ++ +[source,yaml,subs="verbatim,attributes"] +---- +NodeLaunchTemplate: + Type: "aws::EC2::LaunchTemplate" + Properties: + LaunchTemplateData: + InstanceMarketOptions: + MarketType: "capacity-block" + CapacityReservationSpecification: + CapacityReservationTarget: + CapacityReservationId: "cr-02168da1478b509e0" + IamInstanceProfile: + Arn: iam-instance-profile-arn + ImageId: image-id + InstanceType: p5.48xlarge + KeyName: key-name + SecurityGroupIds: + - sg-05b1d815d1EXAMPLE + UserData: user-data +---- +You must pass the subnet in the Availability Zone in which the reservation is made because Capacity Blocks are zonal. +. Use the launch template to create a self-managed node group. If you're doing this prior to the capacity reservation becoming active, then set the desired capacity to `0`. When creating the node group, make sure that you are only specifying the respective subnet for the Availability Zone in which the capacity is reserved. ++ +The following is a sample CloudFormation template that you can reference when creating one that is applicable to your workload. This example gets the `LaunchTemplateId` and `Version` of the `{aws}::Amazon EC2::LaunchTemplate` resource shown in the previous step. It also gets the values for `DesiredCapacity`, `MaxSize`, `MinSize`, and `VPCZoneIdentifier` that are declared elsewhere in the same template. ++ +[source,yaml,subs="verbatim,attributes"] +---- +NodeGroup: + Type: "{aws}::AutoScaling::AutoScalingGroup" + Properties: + DesiredCapacity: !Ref NodeAutoScalingGroupDesiredCapacity + LaunchTemplate: + LaunchTemplateId: !Ref NodeLaunchTemplate + Version: !GetAtt NodeLaunchTemplate.LatestVersionNumber + MaxSize: !Ref NodeAutoScalingGroupMaxSize + MinSize: !Ref NodeAutoScalingGroupMinSize + VPCZoneIdentifier: !Ref Subnets + Tags: + - Key: Name + PropagateAtLaunch: true + Value: !Sub ${ClusterName}-${NodeGroupName}-Node + - Key: !Sub kubernetes.io/cluster/${ClusterName} + PropagateAtLaunch: true + Value: owned +---- +. Once the node group is created successfully, make sure to record the `NodeInstanceRole` for the node group that was created. You need this in order to make sure that when node group is scaled, the new nodes join the cluster and [.noloc]`Kubernetes` is able to recognize the nodes. For more information, see the {aws-management-console} instructions in <>. +. We recommend that you create a scheduled scaling policy for the Auto Scaling group that aligns to the Capacity Block reservation times. For more information, see link:autoscaling/ec2/userguide/ec2-auto-scaling-scheduled-scaling.html[Scheduled scaling for Amazon EC2 Auto Scaling,type="documentation"] in the _Amazon EC2 Auto Scaling User Guide_. ++ +You can use all of the instances you reserved until 30 minutes before the end time of the Capacity Block. Instances that are still running at that time will start terminating. To allow sufficient time to gracefully drain the node(s), we suggest that you schedule scaling to scale to zero more than 30 minutes before the Capacity Block reservation end time. ++ +If you want to instead scale up manually whenever the capacity reservation becomes `Active`, then you need to update the Auto Scaling group's desired capacity at the start time of the Capacity Block reservation. Then you would need to also scale down manually more than 30 minutes before the Capacity Block reservation end time. +. The node group is now ready for workloads and [.noloc]`Pods` to be scheduled. +. In order for your [.noloc]`Pods` to be gracefully drained, we recommend that you set up {aws} Node Termination Handler. This handler will be able to watch for "ASG Scale-in" lifecycle events from Amazon EC2 Auto Scaling using EventBridge and allow the [.noloc]`Kubernetes` control plane to take required action before the instance becomes unavailable. Otherwise, your [.noloc]`Pods` and [.noloc]`Kubernetes` objects will get stuck in a pending state. For more information, see https://github.com/aws/aws-node-termination-handler[{aws} Node Termination Handler] on GitHub. ++ +If you don't setup a Node Termination Handler, we recommend that you start draining your [.noloc]`Pods` manually before hitting the 30 minute window so that they have enough time to be gracefully drained. diff --git a/latest/ug/ml/inferentia-support.adoc b/latest/ug/ml/inferentia-support.adoc new file mode 100644 index 00000000..be85ead1 --- /dev/null +++ b/latest/ug/ml/inferentia-support.adoc @@ -0,0 +1,251 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[inferentia-support,inferentia-support.title]] += Use {aws} [.noloc]`Inferentia` instances with Amazon EKS for Machine Learning +:info_doctype: section +:info_title: Use {aws} Inferentia instances with your EKS cluster for Machine Learning +:info_titleabbrev: Prepare Inferentia clusters +:info_abstract: Learn how to create an Amazon EKS cluster with nodes running Amazon EC2 Inf1 instances for machine learning inference using {aws} Inferentia chips and deploy a TensorFlow Serving application. + +[abstract] +-- +Learn how to create an Amazon EKS cluster with nodes running Amazon EC2 Inf1 instances for machine learning inference using {aws} Inferentia chips and deploy a TensorFlow Serving application. +-- + +This topic describes how to create an Amazon EKS cluster with nodes running link:ec2/instance-types/inf1/[Amazon EC2 Inf1,type="marketing"] instances and (optionally) deploy a sample application. Amazon EC2 Inf1 instances are powered by link:machine-learning/inferentia/[{aws} Inferentia,type="marketing"] chips, which are custom built by {aws} to provide high performance and lowest cost inference in the cloud. Machine learning models are deployed to containers using link:machine-learning/neuron/[{aws} Neuron,type="marketing"], a specialized software development kit (SDK) consisting of a compiler, runtime, and profiling tools that optimize the machine learning inference performance of Inferentia chips. {aws} Neuron supports popular machine learning frameworks such as TensorFlow, PyTorch, and MXNet. + +[NOTE] +==== + +Neuron device logical IDs must be contiguous. If a [.noloc]`Pod` requesting multiple Neuron devices is scheduled on an `inf1.6xlarge` or `inf1.24xlarge` instance type (which have more than one Neuron device), that [.noloc]`Pod` will fail to start if the [.noloc]`Kubernetes` scheduler selects non-contiguous device IDs. For more information, see https://github.com/aws/aws-neuron-sdk/issues/110[Device logical IDs must be contiguous] on [.noloc]`GitHub`. + +==== + +[[inferentia-prerequisites,inferentia-prerequisites.title]] +== Prerequisites + +* Have `eksctl` installed on your computer. If you don't have it installed, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. +* Have `kubectl` installed on your computer. For more information, see <>. +* (Optional) Have `python3` installed on your computer. If you don't have it installed, then see https://www.python.org/downloads/[Python downloads] for installation instructions. + + +[[create-cluster-inferentia,create-cluster-inferentia.title]] +== Create a cluster + +. Create a cluster with Inf1 Amazon EC2 instance nodes. You can replace [.replaceable]`inf1.2xlarge` with any link:ec2/instance-types/inf1/[Inf1 instance type,type="marketing"]. The `eksctl` utility detects that you are launching a node group with an `Inf1` instance type and will start your nodes using one of the Amazon EKS optimized accelerated Amazon Linux AMIs. ++ +NOTE: You can't use <> with TensorFlow Serving. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create cluster \ + --name inferentia \ + --region region-code \ + --nodegroup-name ng-inf1 \ + --node-type inf1.2xlarge \ + --nodes 2 \ + --nodes-min 1 \ + --nodes-max 4 \ + --ssh-access \ + --ssh-public-key your-key \ + --with-oidc +---- ++ +NOTE: Note the value of the following line of the output. It's used in a later (optional) step. ++ +[source,bash,subs="verbatim,attributes"] +---- +[9] adding identity "{arn-aws}iam::111122223333:role/eksctl-inferentia-nodegroup-ng-in-NodeInstanceRole-FI7HIYS3BS09" to auth ConfigMap +---- ++ +When launching a node group with `Inf1` instances, `eksctl` automatically installs the {aws} Neuron [.noloc]`Kubernetes` device plugin. This plugin advertises Neuron devices as a system resource to the [.noloc]`Kubernetes` scheduler, which can be requested by a container. In addition to the default Amazon EKS node IAM policies, the Amazon S3 read only access policy is added so that the sample application, covered in a later step, can load a trained model from Amazon S3. +. Make sure that all [.noloc]`Pods` have started correctly. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kube-system +---- ++ +Abbreviated output: ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY STATUS RESTARTS AGE +[...] +neuron-device-plugin-daemonset-6djhp 1/1 Running 0 5m +neuron-device-plugin-daemonset-hwjsj 1/1 Running 0 5m +---- + + +[[deploy-tensorflow-serving-application,deploy-tensorflow-serving-application.title]] +== (Optional) Deploy a TensorFlow Serving application image + +A trained model must be compiled to an Inferentia target before it can be deployed on Inferentia instances. To continue, you will need a https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/tensorflow-neuron/index.html[Neuron optimized TensorFlow] model saved in Amazon S3. If you don't already have a SavedModel, please follow the tutorial for link:dlami/latest/devguide/tutorial-inferentia-tf-neuron.html[creating a Neuron compatible ResNet50 model,type="documentation"] and upload the resulting SavedModel to S3. ResNet-50 is a popular machine learning model used for image recognition tasks. For more information about compiling Neuron models, see link:dlami/latest/devguide/tutorial-inferentia.html[The {aws} Inferentia Chip With DLAMI,type="documentation"] in the {aws} Deep Learning AMIs Developer Guide. + +The sample deployment manifest manages a pre-built inference serving container for TensorFlow provided by {aws} Deep Learning Containers. Inside the container is the {aws} Neuron Runtime and the TensorFlow Serving application. A complete list of pre-built Deep Learning Containers optimized for Neuron is maintained on [.noloc]`GitHub` under https://github.com/aws/deep-learning-containers/blob/master/available_images.md#neuron-inference-containers[Available Images]. At start-up, the DLC will fetch your model from Amazon S3, launch Neuron TensorFlow Serving with the saved model, and wait for prediction requests. + +The number of Neuron devices allocated to your serving application can be adjusted by changing the `aws.amazon.com/neuron` resource in the deployment yaml. Please note that communication between TensorFlow Serving and the Neuron runtime happens over GRPC, which requires passing the `IPC_LOCK` capability to the container. + +. Add the `AmazonS3ReadOnlyAccess` IAM policy to the node instance role that was created in step 1 of <>. This is necessary so that the sample application can load a trained model from Amazon S3. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonS3ReadOnlyAccess \ + --role-name eksctl-inferentia-nodegroup-ng-in-NodeInstanceRole-FI7HIYS3BS09 +---- +. Create a file named `rn50_deployment.yaml` with the following contents. Update the region-code and model path to match your desired settings. The model name is for identification purposes when a client makes a request to the TensorFlow server. This example uses a model name to match a sample ResNet50 client script that will be used in a later step for sending prediction requests. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ecr list-images --repository-name neuron-rtd --registry-id 790709498068 --region us-west-2 +---- ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: eks-neuron-test + labels: + app: eks-neuron-test + role: master +spec: + replicas: 2 + selector: + matchLabels: + app: eks-neuron-test + role: master + template: + metadata: + labels: + app: eks-neuron-test + role: master + spec: + containers: + - name: eks-neuron-test + image: 763104351884.dkr.ecr.us-east-1.amazonaws.com/tensorflow-inference-neuron:1.15.4-neuron-py37-ubuntu18.04 + command: + - /usr/local/bin/entrypoint.sh + args: + - --port=8500 + - --rest_api_port=9000 + - --model_name=resnet50_neuron + - --model_base_path=s3://${your-bucket-of-models}/resnet50_neuron/ + ports: + - containerPort: 8500 + - containerPort: 9000 + imagePullPolicy: IfNotPresent + env: + - name: AWS_REGION + value: "us-east-1" + - name: S3_USE_HTTPS + value: "1" + - name: S3_VERIFY_SSL + value: "0" + - name: S3_ENDPOINT + value: s3.us-east-1.amazonaws.com + - name: AWS_LOG_LEVEL + value: "3" + resources: + limits: + cpu: 4 + memory: 4Gi + aws.amazon.com/neuron: 1 + requests: + cpu: "1" + memory: 1Gi + securityContext: + capabilities: + add: + - IPC_LOCK +---- +. Deploy the model. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f rn50_deployment.yaml +---- +. Create a file named `rn50_service.yaml` with the following contents. The HTTP and gRPC ports are opened for accepting prediction requests. ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: Service +apiVersion: v1 +metadata: + name: eks-neuron-test + labels: + app: eks-neuron-test +spec: + type: ClusterIP + ports: + - name: http-tf-serving + port: 8500 + targetPort: 8500 + - name: grpc-tf-serving + port: 9000 + targetPort: 9000 + selector: + app: eks-neuron-test + role: master +---- +. Create a [.noloc]`Kubernetes` service for your TensorFlow model Serving application. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f rn50_service.yaml +---- + + +[[make-predictions-against-tensorflow-service,make-predictions-against-tensorflow-service.title]] +== (Optional) Make predictions against your TensorFlow Serving service +. To test locally, forward the gRPC port to the `eks-neuron-test` service. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl port-forward service/eks-neuron-test 8500:8500 & +---- +. Create a Python script called `tensorflow-model-server-infer.py` with the following content. This script runs inference via gRPC, which is service framework. ++ +[source,python,subs="verbatim,attributes"] +---- +import numpy as np + import grpc + import tensorflow as tf + from tensorflow.keras.preprocessing import image + from tensorflow.keras.applications.resnet50 import preprocess_input + from tensorflow_serving.apis import predict_pb2 + from tensorflow_serving.apis import prediction_service_pb2_grpc + from tensorflow.keras.applications.resnet50 import decode_predictions + + if __name__ == '__main__': + channel = grpc.insecure_channel('localhost:8500') + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + img_file = tf.keras.utils.get_file( + "./kitten_small.jpg", + "https://raw.githubusercontent.com/awslabs/mxnet-model-server/master/docs/images/kitten_small.jpg") + img = image.load_img(img_file, target_size=(224, 224)) + img_array = preprocess_input(image.img_to_array(img)[None, ...]) + request = predict_pb2.PredictRequest() + request.model_spec.name = 'resnet50_inf1' + request.inputs['input'].CopyFrom( + tf.make_tensor_proto(img_array, shape=img_array.shape)) + result = stub.Predict(request) + prediction = tf.make_ndarray(result.outputs['output']) + print(decode_predictions(prediction)) +---- +. Run the script to submit predictions to your service. ++ +[source,bash,subs="verbatim,attributes"] +---- +python3 tensorflow-model-server-infer.py +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +[[(u'n02123045', u'tabby', 0.68817204), (u'n02127052', u'lynx', 0.12701613), (u'n02123159', u'tiger_cat', 0.08736559), (u'n02124075', u'Egyptian_cat', 0.063844085), (u'n02128757', u'snow_leopard', 0.009240591)]] +---- diff --git a/latest/ug/ml/machine-learning-on-eks.adoc b/latest/ug/ml/machine-learning-on-eks.adoc new file mode 100644 index 00000000..3a1aaf86 --- /dev/null +++ b/latest/ug/ml/machine-learning-on-eks.adoc @@ -0,0 +1,68 @@ +//!!NODE_ROOT +include::../attributes.txt[] +[.topic] +[[machine-learning-on-eks,machine-learning-on-eks.title]] += Overview of Machine Learning on Amazon EKS +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Machine Learning on Amazon EKS Overview +:info_titleabbrev: Machine Learning on EKS +:keywords: Machine Learning, Amazon EKS, Artificial Intelligence +:info_abstract: Learn to manage containerized applications with Amazon EKS + +[abstract] +-- +Complete guide for running Machine Learning applications on Amazon EKS. This includes everything from provisioning infrastructure to choosing and deploying Machine Learning workloads on Amazon EKS. +-- + +[[ml-features,ml-features.title]] + +Machine Learning (ML) is an area of Artificial Intelligence (AI) where machines process large amounts of data to look for patterns and make connections between the data. This can expose new relationships and help predict outcomes that might not have been apparent otherwise. + +For large-scale ML projects, data centers must be able to store large amounts of data, process data quickly, and integrate data from many sources. The platforms running ML applications must be reliable and secure, but also offer resiliency to recover from data center outages and application failures. {aws} Elastic Kubernetes Service (EKS), running in the {aws} cloud, is particularly suited for ML workloads. + +The primary goal of this section of the EKS User Guide is to help you put together the hardware and software component to build platforms to run Machine Learning workloads in an EKS cluster. +We start by explaining the features and services available to you in EKS and the {aws} cloud, then provide you with tutorials to help you work with ML platforms, frameworks, and models. + +== Advantages of Machine Learning on EKS and the {aws} cloud + +Amazon Elastic Kubernetes Service (EKS) is a powerful, managed Kubernetes platform that has become a cornerstone for deploying and managing AI/ML workloads in the cloud. With its ability to handle complex, resource-intensive tasks, Amazon EKS provides a scalable and flexible foundation for running AI/ML models, making it an ideal choice for organizations aiming to harness the full potential of machine learning. + +Key Advantages of AI/ML Platforms on Amazon EKS include: + +* *Scalability and Flexibility* +Amazon EKS enables organizations to scale AI/ML workloads seamlessly. Whether you're training large language models that require vast amounts of compute power or deploying inference pipelines that need to handle unpredictable traffic patterns, EKS scales up and down efficiently, optimizing resource use and cost. + +* *High Performance with GPUs and Neuron Instances* +Amazon EKS supports a wide range of compute options, including GPUs and {aws}} Neuron instances, which are essential for accelerating AI/ML workloads. This support allows for high-performance training and low-latency inference, ensuring that models run efficiently in production environments. + +* *Integration with AI/ML Tools* +Amazon EKS integrates seamlessly with popular AI/ML tools and frameworks like TensorFlow, PyTorch, and Ray, providing a familiar and robust ecosystem for data scientists and engineers. These integrations enable users to leverage existing tools while benefiting from the scalability and management capabilities of Kubernetes. + +* *Automation and Management* +Kubernetes on Amazon EKS automates many of the operational tasks associated with managing AI/ML workloads. Features like automatic scaling, rolling updates, and self-healing ensure that your applications remain highly available and resilient, reducing the overhead of manual intervention. + +* *Security and Compliance* +Running AI/ML workloads on Amazon EKS provides robust security features, including fine-grained IAM roles, encryption, and network policies, ensuring that sensitive data and models are protected. EKS also adheres to various compliance standards, making it suitable for enterprises with strict regulatory requirements. + +== Why Choose Amazon EKS for AI/ML? +Amazon EKS offers a comprehensive, managed environment that simplifies the deployment of AI/ML models while providing the performance, scalability, and security needed for production workloads. With its ability to integrate with a variety of AI/ML tools and its support for advanced compute resources, EKS empowers organizations to accelerate their AI/ML initiatives and deliver innovative solutions at scale. + +By choosing Amazon EKS, you gain access to a robust infrastructure that can handle the complexities of modern AI/ML workloads, allowing you to focus on innovation and value creation rather than managing underlying systems. Whether you are deploying simple models or complex AI systems, Amazon EKS provides the tools and capabilities needed to succeed in a competitive and rapidly evolving field. + +== Start using Machine Learning on EKS + +To begin planning for and using Machine Learning platforms and workloads on EKS on the {aws} cloud, proceed to the <> section. + +include::ml-get-started.adoc[leveloffset=+1] + +include::ml-prepare-for-cluster.adoc[leveloffset=+1] + +include::ml-tutorials.adoc[leveloffset=+1] diff --git a/latest/ug/ml/ml-eks-optimized-ami.adoc b/latest/ug/ml/ml-eks-optimized-ami.adoc new file mode 100644 index 00000000..7e08f399 --- /dev/null +++ b/latest/ug/ml/ml-eks-optimized-ami.adoc @@ -0,0 +1,85 @@ +//!!NODE_ROOT
+[.topic] +[[ml-eks-optimized-ami,ml-eks-optimized-ami.title]] += Run GPU-accelerated containers (Linux on EC2) +:info_titleabbrev: Run Linux GPU AMIs + +include::../attributes.txt[] + +The Amazon EKS optimized accelerated Amazon Linux AMIs are built on top of the standard Amazon EKS optimized Amazon Linux AMIs. For details on these AMIs, see <>. +The following text describes how to enable {aws} Neuron-based workloads. + +.To enable {aws} Neuron (ML accelerator) based workloads +For details on training and inference workloads using [.noloc]`Neuron` in Amazon EKS, see the following references: + +* https://awsdocs-neuron.readthedocs-hosted.com/en/latest/containers/kubernetes-getting-started.html[Containers - Kubernetes - Getting Started] in the _{aws} [.noloc]`Neuron` Documentation_ +* https://github.com/aws-neuron/aws-neuron-eks-samples/blob/master/README.md#training[Training] in {aws} [.noloc]`Neuron` EKS Samples on GitHub +* <> + +The following procedure describes how to run a workload on a GPU based instance with the Amazon EKS optimized accelerated AMIs. + +. After your GPU nodes join your cluster, you must apply the https://github.com/NVIDIA/k8s-device-plugin[NVIDIA device plugin for Kubernetes] as a [.noloc]`DaemonSet` on your cluster. Replace [.replaceable]`vX.X.X` with your desired https://github.com/NVIDIA/k8s-device-plugin/releases[NVIDIA/k8s-device-plugin] version before running the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/vX.X.X/deployments/static/nvidia-device-plugin.yml +---- +. You can verify that your nodes have allocatable GPUs with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes "-o=custom-columns=NAME:.metadata.name,GPU:.status.allocatable.nvidia\.com/gpu" +---- +. Create a file named `nvidia-smi.yaml` with the following contents. Replace [.replaceable]`tag` with your desired tag for https://hub.docker.com/r/nvidia/cuda/tags[nvidia/cuda]. This manifest launches an https://developer.nvidia.com/cuda-zone[NVIDIA CUDA] container that runs `nvidia-smi` on a node. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: v1 +kind: Pod +metadata: + name: nvidia-smi +spec: + restartPolicy: OnFailure + containers: + - name: nvidia-smi + image: nvidia/cuda:tag + args: + - "nvidia-smi" + resources: + limits: + nvidia.com/gpu: 1 +---- +. Apply the manifest with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f nvidia-smi.yaml +---- +. After the [.noloc]`Pod` has finished running, view its logs with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl logs nvidia-smi +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +Mon Aug 6 20:23:31 20XX ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI XXX.XX Driver Version: XXX.XX | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +|===============================+======================+======================| +| 0 Tesla V100-SXM2... On | 00000000:00:1C.0 Off | 0 | +| N/A 46C P0 47W / 300W | 0MiB / 16160MiB | 0% Default | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: GPU Memory | +| GPU PID Type Process name Usage | +|=============================================================================| +| No running processes found | ++-----------------------------------------------------------------------------+ +---- diff --git a/latest/ug/ml/ml-eks-windows-optimized-ami.adoc b/latest/ug/ml/ml-eks-windows-optimized-ami.adoc new file mode 100644 index 00000000..9dcf0425 --- /dev/null +++ b/latest/ug/ml/ml-eks-windows-optimized-ami.adoc @@ -0,0 +1,223 @@ +//!!NODE_ROOT
+[.topic] +[[ml-eks-windows-optimized-ami,ml-eks-windows-optimized-ami.title]] += Run GPU-accelerated containers (Windows on EC2 G-Series) +:info_titleabbrev: Run Windows GPU AMIs + +include::../attributes.txt[] + +[IMPORTANT] +==== +The https://github.com/TensorWorks/DirectX-Device-Plugins[Kubernetes Device Plugin for DirectX] by TensorWorks is a third-party tool that is not endorsed, supported, or maintained by {aws}. {aws} assumes no responsibility for the security, reliability, or performance of this plugin. +==== + +Learn how to run GPU-accelerated Windows container workloads on Amazon EKS (Elastic Kubernetes Service) using NVIDIA GPUs with the Kubernetes Device Plugin for DirectX by TensorWorks. For more information, see https://github.com/TensorWorks/DirectX-Device-Plugins[Kubernetes Device Plugin for DirectX]. + +There are two main approaches to setting up GPU-acceleration for your Windows containers: + +* **Option 1**: <> with the required GPU drivers pre-installed. +** Use this approach when you need a consistent, pre-configured environment ready to run GPU-accelerated Windows containers, and you're able to invest the additional effort to build and maintain the custom AMI. +* **Option 2**: Install the necessary GPU drivers on your EKS worker nodes after launching your instance. +** Use this approach when you want a simpler setup process and don't mind installing the GPU drivers on each new worker node. More suited to a development environment when you are evaluating or prototyping GPU-accelerated workloads. + +Both approaches can be leveraged using the steps detailed in this guide. + +== Considerations + +This guide provides steps to install and set up GPU-acceleration for your Windows containers using NVIDIA GPUs, NVIDIA GRID drivers, and the https://github.com/TensorWorks/DirectX-Device-Plugins[Kubernetes Device Plugin for DirectX] by TensorWorks. The steps have been tested and verified to provide GPU-acceleration for your Windows container workloads on Amazon EKS. See <> for more information on compatible drivers and device plugins. Before proceeding, note the following: + +* Only G-family instance types with link:AWSEC2/latest/UserGuide/install-nvidia-driver#nvidia-GRID-driver[NVIDIA GRID drivers,type="documentation"] have been tested and verified to work with this guide. While other instance types and driver combinations may also be capable of running GPU-accelerated Windows containers, they may require additional configuration steps not covered in this guide. +* Only DirectX-based workloads have been tested and verified to work with this guide. While other GPU APIs like OpenGL, Vulkan, and OpenCL may potentially be compatible to run GPU-accelerated Windows containers, they may require additional configuration steps not covered in this guide. +* There are some known limitations to be aware of before running GPU-accelerated Windows containers. Please see the <> section for more information. + + +[[ml-eks-windows-ami-prerequisites,ml-eks-windows-ami-prerequisites.title]] +== Prerequisites + +To enable GPU acceleration for your Windows containers on Amazon EKS, you'll need to prepare the following requirements before proceeding: + +* Launch an Amazon EKS cluster with Kubernetes v1.27 or newer. +* Provision Windows nodes with Windows Server 2022 or newer. +* Provision Windows nodes in the G-family of instance types, such as link:ec2/instance-types/g4/[G4,type="marketing"] or link:ec2/instance-types/g5/[G5,type="marketing"]. +* Provision Windows nodes with a container runtime with containerd `1.7.x` or `2.x.x`. (See <> to verify the containerd version in your Amazon EKS Optimized AMI.) + +[[ml-eks-windows-ami-install-gpu-driver,ml-eks-windows-ami-install-gpu-driver.title]] +== Install the GPU driver on each Windows Windows node + +To install the NVIDIA GRID drivers on your EKS worker nodes, follow the steps outlined in link:AWSEC2/latest/UserGuide/install-nvidia-driver.html[NVIDIA drivers for your Amazon EC2 instance,type="documentation"]. +Navigate to link:AWSEC2/latest/UserGuide/install-nvidia-driver#nvidia-GRID-driver[Installation options - Option 3: GRID drivers,type="documentation"] and follow the installation steps. + +**Install for Windows Server Core** + +For Windows Server Core, which doesn’t have a desktop experience, install NVIDIA GRID drivers silently by using the following commands: + +[source,bash,subs="verbatim,attributes,quotes"] +---- +$nvidiaInstallerFilePath = nvidia-driver-installer.exe # Replace with path to installer +$installerArguments = "-s -clean -noreboot -noeula" +Start-Process -FilePath $nvidiaInstallerFilePath -ArgumentList $installerArguments -Wait -NoNewWindow -PassThru +---- + +**Verify your installation** + +Run the following PowerShell command to show diagnostic information about the GPUs on the instance: + +[source,bash,subs="verbatim,attributes,quotes"] +---- +nvidia-smi +---- + +This command displays the NVIDIA driver version, as well as information about the GPU hardware. Ensure that the output of this command matches the NVIDIA GRID driver version you expected to be installed. + +[[ml-eks-windows-ami-deploy-gpu-driver,ml-eks-windows-ami-deploy-gpu-driver.title]] +== Deploy the GPU device plugin on each node + +To enable discovery and exposure of the GPU resources to containers on your Windows nodes, you will need a device plugin. +Deploy the https://github.com/TensorWorks/DirectX-Device-Plugins[DirectX Device Plugin] by Tensorworks on each worker node by running it as a DaemonSet in your EKS cluster. +Follow the installation guide specified in the https://github.com/TensorWorks/DirectX-Device-Plugins/blob/main/README.md[README.md], which will entail the following steps. It is recommended to: + +* Deploy the device plugin in the `kube-system` namespace. +* Set appropriate resource limits for the DaemonSet to ensure it does not consume excessive resources on your nodes. + +[NOTE] +==== +The device plugin DaemonSet will run on every node as a host process container with elevated privileges. It is recommended to implement RBAC controls to restrict access to this DaemonSet so only authorized users can execute privileged commands. +==== + +When running GPU-accelerated containers, the device plugin supports two modes: + +* **Single-tenancy mode**: This mode dedicates all GPU resources to a single container on the instance. Install the device plugins with single-tenancy support using the following command. See README.md for more information. ++ +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl apply -f "https://raw.githubusercontent.com/TensorWorks/directx-device-plugins/main/deployments/default-daemonsets.yml" +---- + +* **Multi-tenancy mode**: This mode allows sharing GPU resources among multiple containers on the instance. Install the device plugins with multi-tenancy support using the following command. See README.md for more information. ++ +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl apply -f "https://raw.githubusercontent.com/TensorWorks/directx-device-plugins/main/deployments/multitenancy-inline.yml" +---- ++ +Alternatively, use a ConfigMap to specify the multi-tenancy. ++ +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl apply -f "https://raw.githubusercontent.com/TensorWorks/directx-device-plugins/main/deployments/multitenancy-configmap.yml" +---- + + +[[ml-eks-windows-ami-verify-device-plugin,ml-eks-windows-ami-verify-device-plugin.title]] +=== Verifying the device plugin deployment + +After you have deployed the device plugin, run the following command to verify the DirectX Device Plugin is running correctly on your all your Windows nodes. +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl get ds device-plugin-wddm -n [.replaceable]`` +---- + + +[[ml-eks-windows-ami-verify-container-deployment,ml-eks-windows-ami-verify-container-deployment.title]] +=== Verifying containers are ready for deployment + +Once the device plugin DaemonSet is running on the GPU-powered Windows worker nodes, use the following command to verify that each node has allocatable GPUs. The corresponding number should match the number of DirectX devices on each node. +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl get nodes "-o=custom-columns=NAME:.metadata.name,DirectX:.status.allocatable.directx\.microsoft\.com/display" +---- + + +[[ml-eks-windows-ami-run-with-gpu-acceleration,ml-eks-windows-ami-run-with-gpu-acceleration.title]] +== Running Windows containers with GPU-acceleration + +Before launching your pods, specify the resource name `directx.microsoft.com/display` in `.spec.containers[].resources`. +This will indicate that your containers require GPU-enabled capabilities, and the `kube-scheduler` will attempt to place your pods on your pre-configured Windows node with available GPU resources. + +As an example, see the sample command below which launches a `Job` to run Monte Carlo simulation to estimate the value of pi. This example is from the https://github.com/TensorWorks/DirectX-Device-Plugins[Kubernetes Device Plugins for DirectX] GitHub repository, which has https://github.com/TensorWorks/DirectX-Device-Plugins/tree/main/examples[multiple examples] to choose from that you can run to test your Windows node GPU capabilities. + + +[source,bash,subs="verbatim,attributes,quotes"] +---- +cat < + +[.topic] +[[ml-get-started,ml-get-started.title]] += Get started with ML +:info_doctype: section +:info_title: Get started deploying Machine Learning tools on EKS +:info_titleabbrev: Get started with ML +:info_abstract: Choose the Machine Learning on EKS tools and platforms that best suit your needs, then use quick start procedures to deploy them to the {aws} cloud. + +include::../attributes.txt[] + + +[abstract] +-- +Choose the Machine Learning on EKS tools and platforms that best suit your needs, then use quick start procedures to deploy ML workloads and EKS clusters to the {aws} cloud. +-- + +To jump into Machine Learning on EKS, start by choosing from these prescriptive patterns to quickly get an EKS cluster and ML software and hardware ready to begin running ML workloads. Most of these patterns are based on Terraform blueprints that are available from the https://awslabs.github.io/data-on-eks/docs/introduction/intro[Data on Amazon EKS] site. Before you begin, here are few things to keep in mind: + +* GPUs or Neuron instances are required to run these procedures. Lack of availability of these resources can cause these procedures to fail during cluster creation or node autoscaling. +* Neuron SDK (Tranium and Inferentia-based instances) can save money and are more available than NVIDIA GPUs. So, when your workloads permit it, we recommend that you consider using Neutron for your Machine Learning workloads (see https://awsdocs-neuron.readthedocs-hosted.com/en/latest/[Welcome to {aws} Neuron]). +* Some of the getting started experiences here require that you get data via your own https://huggingface.co/[Hugging Face] account. + +To get started, choose from the following selection of patterns that are designed to get you started setting up infrastructure to run your Machine Learning workloads: + +* *https://awslabs.github.io/data-on-eks/docs/blueprints/ai-ml/jupyterhub[JupyterHub on EKS]*: Explore the https://awslabs.github.io/data-on-eks/docs/blueprints/ai-ml/jupyterhub[JupyterHub blueprint], which showcases Time Slicing and MIG features, as well as multi-tenant configurations with profiles. This is ideal for deploying large-scale JupyterHub platforms on EKS. +* *https://aws.amazon.com/ai/machine-learning/neuron/[Large Language Models on {aws} Neuron and RayServe]*: Use https://aws.amazon.com/ai/machine-learning/neuron/[{aws} Neuron] to run large language models (LLMs) on Amazon EKS and {aws} Trainium and {aws} Inferentia accelerators. See https://awslabs.github.io/data-on-eks/docs/gen-ai/inference/Neuron/vllm-ray-inf2[Serving LLMs with RayServe and vLLM on {aws} Neuron] for instructions on setting up a platform for making inference requests, with components that include: ++ +** {aws} Neuron SDK toolkit for deep learning +** {aws} Inferentia and Trainium accelerators +** vLLM - variable-length language model (see the https://docs.vllm.ai/en/latest/[vLLM] documentation site) +** RayServe scalable model serving library (see the https://docs.ray.io/en/latest/serve/index.html[Ray Serve: Scalable and Programmable Serving] site) +** Llama-3 language model, using your own https://huggingface.co/[Hugging Face] account. +** Observability with {aws} CloudWatch and Neuron Monitor +** Open WebUI +* *https://awslabs.github.io/data-on-eks/docs/gen-ai/inference/GPUs/vLLM-NVIDIATritonServer[Large Language Models on NVIDIA and Triton]*: Deploy multiple large language models (LLMs) on Amazon EKS and NVIDIA GPUs. See https://awslabs.github.io/data-on-eks/docs/gen-ai/inference/GPUs/vLLM-NVIDIATritonServer[Deploying Multiple Large Language Models with NVIDIA Triton Server and vLLM] for instructions for setting up a platform for making inference requests, with components that include: ++ +** NVIDIA Triton Inference Server (see the https://github.com/triton-inference-server/server[Triton Inference Server] GitHub site) +** vLLM - variable-length language model (see the https://docs.vllm.ai/en/latest/[vLLM] documentation site) +** Two language models: mistralai/Mistral-7B-Instruct-v0.2 and meta-llama/Llama-2-7b-chat-hf, using your own https://huggingface.co/[Hugging Face] account. + +== Continuing with ML on EKS + +Along with choosing from the blueprints described on this page, there are other ways you can proceed through the ML on EKS documentation if you prefer. For example, you can: + +* *Try tutorials for ML on EKS* – Run other end-to-end tutorials for building and running your own Machine Learning models on EKS. See <>. + +To improve your work with ML on EKS, refer to the following: + +* *Prepare for ML* – Learn how to prepare for ML on EKS with features like custom AMIs and GPU reservations. See <>. diff --git a/latest/ug/ml/ml-prepare-for-cluster.adoc b/latest/ug/ml/ml-prepare-for-cluster.adoc new file mode 100644 index 00000000..e813b68b --- /dev/null +++ b/latest/ug/ml/ml-prepare-for-cluster.adoc @@ -0,0 +1,48 @@ +//!!NODE_ROOT
+ +[.topic] +[[ml-prepare-for-cluster,ml-prepare-for-cluster.title]] += Prepare for ML clusters +:info_doctype: section +:info_title: Prepare to create an EKS cluster for Machine Learning +:info_titleabbrev: Prepare for ML +:info_abstract: Learn how to make decisions about CPU, AMIs, and tooling before creating an EKS cluster for ML. + +include::../attributes.txt[] + + +[abstract] +-- +Learn how to make decisions about CPU, AMIs, and tooling before creating an EKS cluster for ML. +-- + +There are ways that you can enhance your Machine Learning on EKS experience. +Following pages in this section will help you: + +* Understand your choices for using ML on EKS and +* Help in preparation of your EKS and ML environment. + +In particular, this will help you: + +* *Choose AMIs*: {aws} offers multiple customized AMIs for running ML workloads on EKS. See <> and <>. +* *Customize AMIs*: You can further modify {aws} custom AMIs to add other software and drivers needed for your particular use cases. See <>. +* *Reserve GPUs*: Because of the demand for GPUs, to ensure that the GPUs you need are available when you need them, you can reserve the GPUs you need in advance. See <>. +* *Add EFA*: Add the Elastic Fabric Adapter to improve network performance for inter-node cluster communications. See <>. +* *Use AWSInferentia workloads*: Create an EKS cluster with Amazon EC2 Inf1 instances. See <>. + +[.topiclist] +[[Topic List]] + +include::ml-eks-optimized-ami.adoc[leveloffset=+1] + +include::ml-eks-windows-optimized-ami.adoc[leveloffset=+1] + +include::capacity-blocks-mng.adoc[leveloffset=+1] + +include::capacity-blocks.adoc[leveloffset=+1] + +include::node-taints-managed-node-groups.adoc[leveloffset=+1] + +include::node-efa.adoc[leveloffset=+1] + +include::inferentia-support.adoc[leveloffset=+1] diff --git a/latest/ug/ml/ml-tutorials.adoc b/latest/ug/ml/ml-tutorials.adoc new file mode 100644 index 00000000..e040f88e --- /dev/null +++ b/latest/ug/ml/ml-tutorials.adoc @@ -0,0 +1,76 @@ +//!!NODE_ROOT
+ +[.topic] +[[ml-tutorials,ml-tutorials.title]] += Try tutorials for deploying Machine Learning workloads on EKS +:info_doctype: section +:info_title: Try tutorials for deploying Machine Learning workloads and platforms on EKS +:info_titleabbrev: Try tutorials for ML on EKS +:info_abstract: Learn how to deploy Machine Learning workloads on EKS + +include::../attributes.txt[] + +If you are interested in setting up Machine Learning platforms and frameworks in EKS, explore the tutorials described in this page. +These tutorials cover everything from patterns for making the best use of GPU processors to choosing modeling tools to building frameworks for specialized industries. + +== Build generative AI platforms on EKS + +* https://aws.amazon.com/blogs/containers/deploy-generative-ai-models-on-amazon-eks/[Deploy Generative AI Models on Amazon EKS] +* https://aws.amazon.com/blogs/containers/building-multi-tenant-jupyterhub-platforms-on-amazon-eks/[Building multi-tenant JupyterHub Platforms on Amazon EKS] +* https://aws.amazon.com/blogs/containers/run-spark-rapids-ml-workloads-with-gpus-on-amazon-emr-on-eks/[Run Spark-RAPIDS ML workloads with GPUs on Amazon EMR on EKS] + +== Run specialized generative AI frameworks on EKS + +* https://aws.amazon.com/blogs/hpc/accelerate-drug-discovery-with-nvidia-bionemo-framework-on-amazon-eks/[Accelerate drug discovery with NVIDIA BioNeMo Framework on Amazon EKS] +* https://aws.amazon.com/blogs/containers/host-the-whisper-model-with-streaming-mode-on-amazon-eks-and-ray-serve/[Host the Whisper Model with Streaming Mode on Amazon EKS and Ray Serve] +* https://aws.amazon.com/blogs/machine-learning/accelerate-your-generative-ai-distributed-training-workloads-with-the-nvidia-nemo-framework-on-amazon-eks/[Accelerate your generative AI distributed training workloads with the NVIDIA NeMo Framework on Amazon EKS] +* https://aws.amazon.com/blogs/publicsector/virtualizing-satcom-operations-aws/[Virtualizing satellite communication operations with {aws}] +* https://aws.amazon.com/blogs/opensource/running-torchserve-on-amazon-elastic-kubernetes-service/[Running TorchServe on Amazon Elastic Kubernetes Service] + +== Maximize NVIDIA GPU performance for ML on EKS + +* Implement GPU sharing to efficiently use NVIDIA GPUs for your EKS clusters: ++ +https://aws.amazon.com/blogs/containers/gpu-sharing-on-amazon-eks-with-nvidia-time-slicing-and-accelerated-ec2-instances/[GPU sharing on Amazon EKS with NVIDIA time-slicing and accelerated EC2 instances] + +* Use Multi-Instance GPUs (MIGs) and NIM microservices to run more pods per GPU on your EKS clusters: ++ +https://aws.amazon.com/blogs/containers/maximizing-gpu-utilization-with-nvidias-multi-instance-gpu-mig-on-amazon-eks-running-more-pods-per-gpu-for-enhanced-performance/[Maximizing GPU utilization with NVIDIA's Multi-Instance GPU (MIG) on Amazon EKS: Running more pods per GPU for enhanced performance] + +* Leverage NVIDIA NIM microservices to optimize inference workloads using optimized microservices to deploy AI models at scale: ++ +https://aws.amazon.com/blogs/hpc/deploying-generative-ai-applications-with-nvidia-nims-on-amazon-eks/[Part 1: Deploying generative AI applications with NVIDIA NIMs on Amazon EKS] ++ +https://aws.amazon.com/blogs/hpc/deploying-generative-ai-applications-with-nvidia-nim-microservices-on-amazon-elastic-kubernetes-service-amazon-eks-part-2/[Part 2: Deploying Generative AI Applications with NVIDIA NIM Microservices on Amazon Elastic Kubernetes Service (Amazon EKS)] + +* https://aws.amazon.com/blogs/containers/scaling-a-large-language-model-with-nvidia-nim-on-amazon-eks-with-karpenter/[Scaling a Large Language Model with NVIDIA NIM on Amazon EKS with Karpenter] + + +* https://aws.amazon.com/blogs/machine-learning/build-and-deploy-a-scalable-machine-learning-system-on-kubernetes-with-kubeflow-on-aws/[Build and deploy a scalable machine learning system on Kubernetes with Kubeflow on {aws}] + +== Run video encoding workloads on EKS + +* https://aws.amazon.com/blogs/containers/delivering-video-content-with-fractional-gpus-in-containers-on-amazon-eks/[Delivering video content with fractional GPUs in containers on Amazon EKS] + +== Accelerate image loading for inference workloads + +* https://aws.amazon.com/blogs/containers/how-h2o-ai-optimized-and-secured-their-ai-ml-infrastructure-with-karpenter-and-bottlerocket/[How H2O.ai optimized and secured their AI/ML infrastructure with Karpenter and Bottlerocket] + +== Testimonials for ML on EKS + +* https://aws.amazon.com/blogs/containers/quora-3x-faster-machine-learning-25-lower-costs-with-nvidia-triton-on-amazon-eks/[Quora achieved 3x lower latency and 25% lower Costs by modernizing model serving with Nvidia Triton on Amazon EKS] + +== Monitoring ML workloads + +* https://aws.amazon.com/blogs/mt/monitoring-gpu-workloads-on-amazon-eks-using-aws-managed-open-source-services/[Monitoring GPU workloads on Amazon EKS using {aws} managed open-source services] +* https://aws.amazon.com/blogs/machine-learning/enable-pod-based-gpu-metrics-in-amazon-cloudwatch/[Enable pod-based GPU metrics in Amazon CloudWatch] + +== Announcements for ML on EKS + +* https://aws.amazon.com/blogs/containers/bottlerocket-support-for-nvidia-gpus/[Bottlerocket support for NVIDIA GPUs] +* https://aws.amazon.com/blogs/aws/new-ec2-instances-g5-with-nvidia-a10g-tensor-core-gpus/[New – EC2 Instances (G5) with NVIDIA A10G Tensor Core GPUs] +* https://aws.amazon.com/blogs/containers/utilizing-nvidia-multi-instance-gpu-mig-in-amazon-ec2-p4d-instances-on-amazon-elastic-kubernetes-service-eks/[Utilizing NVIDIA Multi-Instance GPU (MIG) in Amazon EC2 P4d Instances on Amazon Elastic Kubernetes Service] +* https://aws.amazon.com/blogs/aws/new-gpu-equipped-ec2-p4-instances-for-machine-learning-hpc/[New – GPU-Equipped EC2 P4 Instances for Machine Learning & HPC] +* https://aws.amazon.com/blogs/machine-learning/amazon-ec2-p5e-instances-are-generally-available/[Amazon EC2 P5e instances are generally available] +* https://aws.amazon.com/blogs/containers/deploying-managed-p4d-instances-in-amazon-elastic-kubernetes-service/[Deploying managed P4d Instances in Amazon Elastic Kubernetes Service with NVIDIA GPUDirectRDMA] +* https://aws.amazon.com/blogs/machine-learning/establishing-an-ai-ml-center-of-excellence/[Establishing an AI/ML center of excellence] diff --git a/latest/ug/ml/node-efa.adoc b/latest/ug/ml/node-efa.adoc new file mode 100644 index 00000000..934678c5 --- /dev/null +++ b/latest/ug/ml/node-efa.adoc @@ -0,0 +1,316 @@ +//!!NODE_ROOT
+:AWSEC2-latest-UserGuide-using-eni-html-network-cards: AWSEC2/latest/UserGuide/using-eni.html#network-cards + +[.topic] +[[node-efa,node-efa.title]] += Run machine learning training on Amazon EKS with [.noloc]`Elastic Fabric Adapter` +:info_doctype: section +:info_title: Add Elastic Fabric \ + Adapter to EKS clusters for ML training +:info_titleabbrev: Prepare training clusters with EFA +:info_abstract: Learn how to integrate Elastic Fabric Adapter (EFA) with Amazon EKS to run machine \ + learning training workloads requiring high inter-node communications at scale using \ + p4d instances with GPUDirect RDMA and NVIDIA Collective Communications Library \ + (NCCL). + +include::../attributes.txt[] + +[abstract] +-- +Learn how to integrate Elastic Fabric Adapter (EFA) with Amazon EKS to run machine learning training workloads requiring high inter-node communications at scale using p4d instances with [.noloc]`GPUDirect RDMA` and [.noloc]`NVIDIA Collective Communications Library (NCCL)`. +-- + +This topic describes how to integrate Elastic Fabric Adapter (EFA) with [.noloc]`Pods` deployed in your Amazon EKS cluster. Elastic Fabric Adapter (EFA) is a network interface for Amazon EC2 instances that enables you to run applications requiring high levels of inter-node communications at scale on {aws}. Its custom-built operating system bypass hardware interface enhances the performance of inter-instance communications, which is critical to scaling these applications. With EFA, High Performance Computing (HPC) applications using the Message Passing Interface (MPI) and Machine Learning (ML) applications using NVIDIA Collective Communications Library (NCCL) can scale to thousands of CPUs or GPUs. As a result, you get the application performance of on-premises HPC clusters with the on-demand elasticity and flexibility of the {aws} cloud. Integrating EFA with applications running on Amazon EKS clusters can reduce the time to complete large scale distributed training workloads without having to add additional instances to your cluster. For more information about EFA, link:hpc/efa/[Elastic Fabric Adapter,type="marketing"]. + +[[efa-instances,efa-instances.title]] +== Instance types with EFA + +The _{aws} EFA Kubernetes Device Plugin_ supports all Amazon EC2 instance types that have EFA. To see a list of all instance types that have EFA, see link:AWSEC2/latest/UserGuide/efa.html#efa-instance-types[Supported instance types,type="documentation"] in the _Amazon EC2 User Guide_. However, to run ML applications quickly, we recommend that an instance has hardware acceleration chips such as [.noloc]`nVidia` GPUs, link:machine-learning/inferentia/[{aws} Inferentia,type="marketing"] chips, or link:machine-learning/trainium/[{aws} Trainium,type="marketing"] chips, in addition to the EFA. To see a list of instance types that have hardware acceleration chips and EFA, see link:AWSEC2/latest/UserGuide/efa.html#efa-instance-types[Accelerated computing,type="documentation"] in the _Amazon EC2 User Guide_. + +As you compare instance types to choose between them, consider the number of EFA network cards available for that instance type as well as the number of accelerator cards, amount of CPU, and amount of memory. You can assign up to one EFA per network card. An EFA counts as a network interface.. To see how many EFA are available for each instance types that have EFA, see the link:AWSEC2/latest/UserGuide/using-eni.html#network-cards[Network cards,type="documentation"] list in the _Amazon EC2 User Guide_. + +[[efa-only-interfaces,efa-only-interfaces.title]] +== EFA and EFA-only interfaces + +An _Elastic Fabric Adapter (EFA)_ is a network interface that combines the capabilities of an Elastic Network Adapter (ENA) and an OS-bypass interface, powered by the {aws} Scalable Reliable Datagram (SRD) protocol. The EFA functionalities allow applications to communicate directly with the hardware for low-latency transport. You can choose to access only the EFA capabilities using _EFA-only_ interfaces, limiting communication to interfaces within the same Availability Zone. + +To create nodes that can have EFA-only interfaces, you must use a custom EC2 Launch Template and set the `InterfaceType` to `efa-only`. In your custom Launch Template, you can't set the network card `0` to an EFA-only interface, as that is the primary network card and network interface of the EC2 instance. You must have VPC CNI version `1.18.5` or later for EFA-only interfaces. If you are using Amazon Linux 2, ami version has to be `v20240928` or later for EfA-only interfaces. + +The following procedure guides you to create an EKS cluster with `eksctl` with nodes that have [.noloc]`nVidia` GPUs and EFA interfaces. You can't use `eksctl` to create nodes and node groups that use EFA-only interfaces. + +[[efa-prereqs,efa-prereqs.title]] +== Prerequisites + +* An existing Amazon EKS cluster. If you don't have an existing cluster, create one using <>.. Your cluster must be deployed in a VPC that has at least one private subnet with enough available IP addresses to deploy nodes in. The private subnet must have outbound internet access provided by an external device, such as a NAT gateway. ++ +If you plan to use `eksctl` to create your node group, `eksctl` can also create a cluster for you. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* You must have the [.noloc]`Amazon VPC CNI plugin for Kubernetes` version `1.7.10` or later installed before launching worker nodes that support multiple Elastic Fabric Adapters, such as the `p4d` or `p5`. For more information about updating your [.noloc]`Amazon VPC CNI plugin for Kubernetes` version, see <>. + + +[IMPORTANT] +==== + +An important consideration required for adopting EFA with [.noloc]`Kubernetes` is configuring and managing [.noloc]`Huge Pages` as a resource in the cluster. For more information, see https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/[Manage Huge Pages] in the [.noloc]`Kubernetes` documentation. Amazon EC2 instances with the EFA driver installed pre-allocate 5128 2MiB Huge Pages, which you can request as resources to consume in your job specifications. + +==== + +[[efa-create-nodegroup,efa-create-nodegroup.title]] +== Create node group + +The following procedure helps you create a node group with a `p4d.24xlarge` backed node group with EFA interfaces and GPUDirect RDMA, and run an example NVIDIA Collective Communications Library (NCCL) test for multi-node NCCL Performance using EFAs. The example can be used a template for distributed deep learning training on Amazon EKS using EFAs. + +. Determine which Amazon EC2 instance types that support EFA are available in the {aws} Region that you want to deploy nodes in. Replace [.replaceable]`region-code` with the {aws} Region that you want to deploy your node group in. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 describe-instance-types --region region-code \ + --filters Name=network-info.efa-supported,Values=true \ + --query "InstanceTypes[*].[InstanceType]" --output text +---- ++ +When you deploy nodes, the instance type that you want to deploy must be available in the {aws} Region that your cluster is in. +. Determine which Availability Zones that the instance type that you want to deploy is available in. In this tutorial, the `p5.48xlarge` instance type is used and must be returned in the output for the {aws} Region that you specified in the previous step. When you deploy nodes in a production cluster, replace [.replaceable]`p5.48xlarge` with any instance type returned in the previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 describe-instance-type-offerings --region region-code \ + --location-type availability-zone --filters Name=instance-type,Values=p4d.24xlarge,p5.48xlarge \ + --query 'InstanceTypeOfferings[*].Location' --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +us-west-2a us-west-2c us-west-2b +---- ++ +Note the Availability Zones returned for use in later steps. When you deploy nodes to a cluster, your VPC must have subnets with available IP addresses in one of the Availability Zones returned in the output. +. Create a node group using `eksctl`. You need version `{eksctl-min-version}` or later of the `eksctl` command line tool installed on your device or {aws} CloudShell. To install or update `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. ++ +.. Copy the following contents to a file named [.replaceable]`efa-cluster.yaml`. Replace the [.replaceable]`example values` with your own. You can replace [.replaceable]`p5.48xlarge` with a different instance, but if you do, make sure that the values for `availabilityZones` are Availability Zones that were returned for the instance type in step 1. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: my-efa-cluster + region: region-code + version: "1.XX" + +iam: + withOIDC: true + +availabilityZones: ["us-west-2a", "us-west-2c"] + +managedNodeGroups: + - name: my-efa-ng + instanceType: p5.48xlarge + minSize: 1 + desiredCapacity: 2 + maxSize: 3 + availabilityZones: ["us-west-2a"] + volumeSize: 300 + privateNetworking: true + efaEnabled: true +---- +.. Create a managed node group in an existing cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup -f efa-cluster.yaml +---- ++ +If you don't have an existing cluster, you can run the following command to create a cluster and the node group. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create cluster -f efa-cluster.yaml +---- ++ +NOTE: Because the instance type used in this example has GPUs, `eksctl` automatically installs the NVIDIA Kubernetes device plugin on each instance for you. +. Deploy the EFA Kubernetes device plugin. ++ +The EFA Kubernetes device plugin detects and advertises EFA interfaces as allocatable resources to Kubernetes. An application can consume the extended resource type `vpc.amazonaws.com/efa` in a [.noloc]`Pod` request spec just like CPU and memory. For more information, see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#consuming-extended-resources[Consuming extended resources] in the [.noloc]`Kubernetes` documentation. Once requested, the plugin automatically assigns and mounts an EFA interface to the [.noloc]`Pod`. Using the device plugin simplifies EFA setup and does not require a [.noloc]`Pod` to run in privileged mode. ++ +[source,bash,subs="verbatim,attributes"] +---- +helm repo add eks https://aws.github.io/eks-charts +helm install aws-efa-k8s-device-plugin --namespace kube-system eks/aws-efa-k8s-device-plugin +---- + + +[[efa-application,efa-application.title]] +== (Optional) Test the performance of the EFA + +We recommend that you test the EFA setup. You can use the https://github.com/aws-samples/awsome-distributed-training/tree/main/micro-benchmarks/nccl-tests[NCCL Tests] in the `aws-samples/awsome-distributed-training` repository on GitHub. https://github.com/NVIDIA/nccl-tests[NCCL Tests] evaluate the performance of the network using the Nvidia Collective Communication Library. The following steps submit NCCL tests on Amazon EKS. + +. Deploy the Kubeflow MPI Operator: ++ +For the NCCL tests you can apply the Kubeflow MPI Operator. The MPI Operator makes it easy to run Allreduce-style distributed training on Kubernetes. For more information, see https://github.com/kubeflow/mpi-operator[MPI Operator] on [.noloc]`GitHub`. +. Run the multi-node NCCL Performance Test to verify GPUDirectRDMA/EFA: ++ +To verify NCCL performance with [.noloc]`GPUDirectRDMA` over EFA, run the standard NCCL Performance test. For more information, see the official https://github.com/NVIDIA/nccl-tests.git[NCCL-Tests] repo on [.noloc]`GitHub`. ++ +Complete the following steps to run a two node [.noloc]`NCCL Performance Test`. In the example [.noloc]`NCCL` test job, each worker requests eight GPUs, 5210Mi of `hugepages-2Mi`, four EFAs, and 8000Mi of memory, which effectively means each worker consumes all the resources of a `p5.48xlarge` instance. ++ +.. Create the MPIJob manifest: ++ +Copy the following to a file named `nccl-tests.yaml`: ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: nccl-tests +spec: + runPolicy: + cleanPodPolicy: Running + backoffLimit: 20 + slotsPerWorker: 8 + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + restartPolicy: OnFailure + containers: + - image: public.ecr.aws/hpc-cloud/nccl-tests:latest + imagePullPolicy: IfNotPresent + name: test-nccl-launcher + env: + - name: PATH + value: $PATH:/opt/amazon/efa/bin:/usr/bin + - name: LD_LIBRARY_PATH + value: /opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/nvidia/lib:$LD_LIBRARY_PATH + - name: NCCL_DEBUG + value: INFO + - name: NCCL_BUFFSIZE + value: '8388608' + - name: NCCL_P2P_NET_CHUNKSIZE + value: '524288' + - name: NCCL_TUNER_PLUGIN + value: /opt/aws-ofi-nccl/install/lib/libnccl-ofi-tuner.so + command: + - /opt/amazon/openmpi/bin/mpirun + - --allow-run-as-root + - --tag-output + - -np + - "16" + - -N + - "8" + - --bind-to + - none + - -x + - PATH + - -x + - LD_LIBRARY_PATH + - -x + - NCCL_DEBUG=INFO + - -x + - NCCL_BUFFSIZE + - -x + - NCCL_P2P_NET_CHUNKSIZE + - -x + - NCCL_TUNER_PLUGIN + - --mca + - pml + - ^cm,ucx + - --mca + - btl + - tcp,self + - --mca + - btl_tcp_if_exclude + - lo,docker0,veth_def_agent + - /opt/nccl-tests/build/all_reduce_perf + - -b + - "8" + - -e + - "16G" + - -f + - "2" + - -g + - "1" + - -c + - "1" + - -n + - "100" + Worker: + replicas: 2 + template: + spec: + nodeSelector: + node.kubernetes.io/instance-type: "p5.48xlarge" + containers: + - image: public.ecr.aws/hpc-cloud/nccl-tests:latest + imagePullPolicy: IfNotPresent + name: nccl-tests-worker + volumeMounts: + - name: shmem + mountPath: /dev/shm + resources: + limits: + nvidia.com/gpu: 8 + hugepages-2Mi: 5120Mi + vpc.amazonaws.com/efa: 32 + memory: 32000Mi + requests: + nvidia.com/gpu: 8 + hugepages-2Mi: 5120Mi + vpc.amazonaws.com/efa: 32 + memory: 32000Mi + volumes: + - name: shmem + hostPath: + path: /dev/shm +---- +.. Apply the NCCL-tests MPIJob: ++ +Submit the `MPIJob` by applying the manifest. This will create two `p5.48xlarge` Amazon EC2 instances. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f nccl-tests.yaml +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +mpijob.kubeflow.org/nccl-tests created +---- +.. Verify that the job started pods: ++ +View your running [.noloc]`Pods`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY STATUS RESTARTS AGE +nccl-tests-launcher-nbql9 0/1 Init:0/1 0 2m49s +nccl-tests-worker-0 1/1 Running 0 2m49s +nccl-tests-worker-1 1/1 Running 0 2m49s +---- ++ +The MPI Operator creates a launcher [.noloc]`Pod` and 2 worker [.noloc]`Pods` (one on each node). +.. Verify that the job is running successfully with the logs: ++ +View the log for the `nccl-tests-launcher` [.noloc]`Pod`. Replace [.replaceable]`nbql9` with the value from your output. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl logs -f nccl-tests-launcher-nbql9 +---- + +If the test completed successfully, you can deploy your applications that use the [.noloc]`Nvidia Collective Communication Library`. diff --git a/latest/ug/ml/node-taints-managed-node-groups.adoc b/latest/ug/ml/node-taints-managed-node-groups.adoc new file mode 100644 index 00000000..4e3d3cf8 --- /dev/null +++ b/latest/ug/ml/node-taints-managed-node-groups.adoc @@ -0,0 +1,65 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[node-taints-managed-node-groups,node-taints-managed-node-groups.title]] += Prevent [.noloc]`Pods` from being scheduled on specific nodes +:info_titleabbrev: Taint GPU nodes + +[abstract] +-- +Taints and tolerations work together to ensure that [.noloc]`Pods` aren't scheduled onto inappropriate nodes. This can be particularly useful for nodes running on GPU hardware. +-- + +Nodes with specialized processors, such as GPUs, can be more expensive to run than nodes running on more standard machines. +For that reason, you may want to protect those nodes from having workloads that don't require special hardware from being deployed to those nodes. +One way to do that is with taints. + +Amazon EKS supports configuring [.noloc]`Kubernetes` taints through managed node groups. Taints and tolerations work together to ensure that [.noloc]`Pods` aren't scheduled onto inappropriate nodes. One or more taints can be applied to a node. This marks that the node shouldn't accept any [.noloc]`Pods` that don't tolerate the taints. Tolerations are applied to [.noloc]`Pods` and allow, but don't require, the [.noloc]`Pods` to schedule onto nodes with matching taints. For more information, see https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/[Taints and Tolerations] in the [.noloc]`Kubernetes` documentation. + +[.noloc]`Kubernetes` node taints can be applied to new and existing managed node groups using the {aws-management-console} or through the Amazon EKS API. + + + +* For information on creating a node group with a taint using the {aws-management-console}, see <>. +* The following is an example of creating a node group with a taint using the {aws} CLI: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-nodegroup \ + --cli-input-json ' +{ + "clusterName": "my-cluster", + "nodegroupName": "node-taints-example", + "subnets": [ + "subnet-1234567890abcdef0", + "subnet-abcdef01234567890", + "subnet-021345abcdef67890" + ], + "nodeRole": "{arn-aws}iam::111122223333:role/AmazonEKSNodeRole", + "taints": [ + { + "key": "dedicated", + "value": "gpuGroup", + "effect": "NO_SCHEDULE" + } + ] +}' +---- + +For more information and examples of usage, see https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#taint[taint] in the [.noloc]`Kubernetes` reference documentation. + +[NOTE] +==== + + +* Taints can be updated after you create the node group using the `UpdateNodegroupConfig` API. +* The taint key must begin with a letter or number. It can contain letters, numbers, hyphens (`-`), periods (`.`), and underscores (`_`). It can be up to 63 characters long. +* Optionally, the taint key can begin with a DNS subdomain prefix and a single `/`. If it begins with a DNS subdomain prefix, it can be 253 characters long. +* The value is optional and must begin with a letter or number. It can contain letters, numbers, hyphens (`-`), periods (`.`), and underscores (`_`). It can be up to 63 characters long. +* When using [.noloc]`Kubernetes` directly or the {aws-management-console}, the taint effect must be `NoSchedule`, `PreferNoSchedule`, or `NoExecute`. However, when using the {aws} CLI or API, the taint effect must be `NO_SCHEDULE`, `PREFER_NO_SCHEDULE`, or `NO_EXECUTE`. +* A maximum of 50 taints are allowed per node group. +* If taints that were created using a managed node group are removed manually from a node, then Amazon EKS doesn't add the taints back to the node. This is true even if the taints are specified in the managed node group configuration. + +==== + +You can use the link:cli/latest/reference/eks/update-nodegroup-config.html[aws eks update-nodegroup-config,type="documentation"] {aws} CLI command to add, remove, or replace taints for managed node groups. diff --git a/latest/ug/networking/YAML b/latest/ug/networking/YAML new file mode 120000 index 00000000..2976443b --- /dev/null +++ b/latest/ug/networking/YAML @@ -0,0 +1 @@ +../YAML \ No newline at end of file diff --git a/latest/ug/networking/auto-mode-addon-note.adoc b/latest/ug/networking/auto-mode-addon-note.adoc new file mode 100644 index 00000000..a7ae32ca --- /dev/null +++ b/latest/ug/networking/auto-mode-addon-note.adoc @@ -0,0 +1,6 @@ +[TIP] +==== +With Amazon EKS Auto Mode, you don't need to install or upgrade networking add-ons. Auto Mode includes pod networking and load balancing capabilities. + +For more information, see <>. +==== diff --git a/latest/ug/networking/creating-a-vpc.adoc b/latest/ug/networking/creating-a-vpc.adoc new file mode 100644 index 00000000..a776cd76 --- /dev/null +++ b/latest/ug/networking/creating-a-vpc.adoc @@ -0,0 +1,130 @@ +//!!NODE_ROOT
+[.topic] +[[creating-a-vpc,creating-a-vpc.title]] += Create an Amazon VPC for your Amazon EKS cluster +:info_doctype: section +:info_title: Create an Amazon VPC for your Amazon EKS cluster +:info_titleabbrev: Create a VPC +:info_abstract: Learn how to create an Amazon VPC for your cluster using an Amazon EKS provided {aws} CloudFormation \ + template. + +include::../attributes.txt[] + +[abstract] +-- +Learn how to create an Amazon VPC for your cluster using an Amazon EKS provided {aws} CloudFormation template. +-- + +You can use Amazon Virtual Private Cloud (Amazon VPC) to launch {aws} resources into a virtual network that you've defined. This virtual network closely resembles a traditional network that you might operate in your own data center. However, it comes with the benefits of using the scalable infrastructure of Amazon Web Services. We recommend that you have a thorough understanding of the Amazon VPC service before deploying production Amazon EKS clusters. For more information, see the link:vpc/latest/userguide/[Amazon VPC User Guide,type="documentation"]. + +An Amazon EKS cluster, nodes, and [.noloc]`Kubernetes` resources are deployed to a VPC. If you want to use an existing VPC with Amazon EKS, that VPC must meet the requirements that are described in <>. This topic describes how to create a VPC that meets Amazon EKS requirements using an Amazon EKS provided {aws} CloudFormation template. Once you've deployed a template, you can view the resources created by the template to know exactly what resources it created, and the configuration of those resources. If you are using hybrid nodes, your VPC must have routes in its route table for your on-premises network. For more information about the network requirements for hybrid nodes, see <>. + +== Prerequisites + +To create a VPC for Amazon EKS, you must have the necessary IAM permissions to create Amazon VPC resources. These resources are VPCs, subnets, security groups, route tables and routes, and internet and NAT gateways. For more information, see link:vpc/latest/userguide/vpc-policy-examples.html#vpc-public-subnet-iam[Create a VPC with a public subnet example policy,type="documentation"] in the Amazon VPC User Guide and the full list of link:service-authorization/latest/reference/list_amazonec2.html#amazonec2-actions-as-permissions[Actions, resources, and condition keys for Amazon EC2,type="documentation"] in the link:service-authorization/latest/reference/reference.html[Service Authorization Reference,type="documentation"]. + + +You can create a VPC with public and private subnets, only public subnets, or only private subnets. + +== Public and private subnets + +This VPC has two public and two private subnets. A public subnet's associated route table has a route to an internet gateway. However, the route table of a private subnet doesn't have a route to an internet gateway. One public and one private subnet are deployed to the same Availability Zone. The other public and private subnets are deployed to a second Availability Zone in the same {aws} Region. We recommend this option for most deployments. + +With this option, you can deploy your nodes to private subnets. This option allows [.noloc]`Kubernetes` to deploy load balancers to the public subnets that can load balance traffic to [.noloc]`Pods` that run on nodes in the private subnets. Public `IPv4` addresses are automatically assigned to nodes that are deployed to public subnets, but public `IPv4` addresses aren't assigned to nodes deployed to private subnets. + +You can also assign `IPv6` addresses to nodes in public and private subnets. The nodes in private subnets can communicate with the cluster and other {aws} services. [.noloc]`Pods` can communicate to the internet through a NAT gateway using `IPv4` addresses or outbound-only Internet gateway using `IPv6` addresses deployed in each Availability Zone. A security group is deployed that has rules that deny all inbound traffic from sources other than the cluster or nodes but allows all outbound traffic. The subnets are tagged so that [.noloc]`Kubernetes` can deploy load balancers to them. + +.. Open the link:cloudformation/[{aws} CloudFormation console,type="console"]. +.. From the navigation bar, select an {aws} Region that supports Amazon EKS. +.. Choose *Create stack*, *With new resources (standard)*. +.. Under *Prerequisite - Prepare template*, make sure that *Template is ready* is selected and then under *Specify template*, select *Amazon S3 URL*. +.. You can create a VPC that supports only `IPv4`, or a VPC that supports `IPv4` and `IPv6`. Paste one of the following URLs into the text area under *Amazon S3 URL* and choose *Next*: + +*** `IPv4` + +[source,none,subs="verbatim,attributes"] +---- +https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml +---- +*** `IPv4` and `IPv6` + +[source,none,subs="verbatim,attributes"] +---- +https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-ipv6-vpc-public-private-subnets.yaml +---- +.. On the *Specify stack details* page, enter the parameters, and then choose *Next*. + +*** *Stack name*: Choose a stack name for your {aws} CloudFormation stack. For example, you can use the template name you used in the previous step. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +*** *VpcBlock*: Choose an `IPv4` CIDR range for your VPC. Each node, [.noloc]`Pod`, and load balancer that you deploy is assigned an `IPv4` address from this block. The default `IPv4` values provide enough IP addresses for most implementations, but if it doesn't, then you can change it. For more information, see link:vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing[VPC and subnet sizing,type="documentation"] in the Amazon VPC User Guide. You can also add additional CIDR blocks to the VPC once it's created. If you're creating an `IPv6` VPC, `IPv6` CIDR ranges are automatically assigned for you from Amazon's Global Unicast Address space. +*** *PublicSubnet01Block*: Specify an `IPv4` CIDR block for public subnet 1. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. If you're creating an `IPv6` VPC, this block is specified for you within the template. +*** *PublicSubnet02Block*: Specify an `IPv4` CIDR block for public subnet 2. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. If you're creating an `IPv6` VPC, this block is specified for you within the template. +*** *PrivateSubnet01Block*: Specify an `IPv4` CIDR block for private subnet 1. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. If you're creating an `IPv6` VPC, this block is specified for you within the template. +*** *PrivateSubnet02Block*: Specify an `IPv4` CIDR block for private subnet 2. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. If you're creating an `IPv6` VPC, this block is specified for you within the template. +.. (Optional) On the *Configure stack options* page, tag your stack resources and then choose *Next*. +.. On the *Review* page, choose *Create stack*. +.. When your stack is created, select it in the console and choose *Outputs*. +.. Record the *VpcId* for the VPC that was created. You need this when you create your cluster and nodes. +.. Record the *SubnetIds* for the subnets that were created and whether you created them as public or private subnets. You need at least two of these when you create your cluster and nodes. +.. If you created an `IPv4` VPC, skip this step. If you created an `IPv6` VPC, you must enable the auto-assign `IPv6` address option for the public subnets that were created by the template. That setting is already enabled for the private subnets. To enable the setting, complete the following steps: + +... Open the Amazon VPC console at https://console.aws.amazon.com/vpc/. +... In the left navigation pane, choose *Subnets* +... Select one of your public subnets (*[.replaceable]`stack-name`/SubnetPublic01* or *[.replaceable]`stack-name`/SubnetPublic02* contains the word *public*) and choose *Actions*, *Edit subnet settings*. +... Choose the *Enable auto-assign `*IPv6*` address* check box and then choose *Save*. +... Complete the previous steps again for your other public subnet. + + +== Only public subnets +This VPC has three public subnets that are deployed into different Availability Zones in an {aws} Region. All nodes are automatically assigned public `IPv4` addresses and can send and receive internet traffic through an link:vpc/latest/userguide/VPC_Internet_Gateway.html[internet gateway,type="documentation"]. A link:vpc/latest/userguide/VPC_SecurityGroups.html[security group,type="documentation"] is deployed that denies all inbound traffic and allows all outbound traffic. The subnets are tagged so that [.noloc]`Kubernetes` can deploy load balancers to them. + +.. Open the link:cloudformation/[{aws} CloudFormation console,type="console"]. +.. From the navigation bar, select an {aws} Region that supports Amazon EKS. +.. Choose *Create stack*, *With new resources (standard)*. +.. Under *Prepare template*, make sure that *Template is ready* is selected and then under *Template source*, select *Amazon S3 URL*. +.. Paste the following URL into the text area under *Amazon S3 URL* and choose *Next*: + +[source,none,subs="verbatim,attributes"] +---- +https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-sample.yaml +---- +.. On the *Specify Details* page, enter the parameters, and then choose *Next*. + +*** *Stack name*: Choose a stack name for your {aws} CloudFormation stack. For example, you can call it [.replaceable]`amazon-eks-vpc-sample`. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +*** *VpcBlock*: Choose a CIDR block for your VPC. Each node, [.noloc]`Pod`, and load balancer that you deploy is assigned an `IPv4` address from this block. The default `IPv4` values provide enough IP addresses for most implementations, but if it doesn't, then you can change it. For more information, see link:vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing[VPC and subnet sizing,type="documentation"] in the Amazon VPC User Guide. You can also add additional CIDR blocks to the VPC once it's created. +*** *Subnet01Block*: Specify a CIDR block for subnet 1. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. +*** *Subnet02Block*: Specify a CIDR block for subnet 2. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. +*** *Subnet03Block*: Specify a CIDR block for subnet 3. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. +.. (Optional) On the *Options* page, tag your stack resources. Choose *Next*. +.. On the *Review* page, choose *Create*. +.. When your stack is created, select it in the console and choose *Outputs*. +.. Record the *VpcId* for the VPC that was created. You need this when you create your cluster and nodes. +.. Record the *SubnetIds* for the subnets that were created. You need at least two of these when you create your cluster and nodes. +.. (Optional) Any cluster that you deploy to this VPC can assign private `IPv4` addresses to your [.noloc]`Pods` and [.noloc]`services`. If you want to deploy clusters to this VPC to assign private `IPv6` addresses to your [.noloc]`Pods` and [.noloc]`services`, make updates to your VPC, subnet, route tables, and security groups. For more information, see link:vpc/latest/userguide/vpc-migrate-ipv6.html[Migrate existing VPCs from IPv4 to IPv6,type="documentation"] in the Amazon VPC User Guide. Amazon EKS requires that your subnets have the `Auto-assign` `IPv6` addresses option enabled. By default, it's disabled. + + +== Only private subnets +This VPC has three private subnets that are deployed into different Availability Zones in the {aws} Region. Resources that are deployed to the subnets can't access the internet, nor can the internet access resources in the subnets. The template creates link:vpc/latest/privatelink/privatelink-access-aws-services.html[VPC endpoints,type="documentation"] using {aws} PrivateLink for several {aws} services that nodes typically need to access. If your nodes need outbound internet access, you can add a public link:vpc/latest/userguide/vpc-nat-gateway.html[NAT gateway,type="documentation"] in the Availability Zone of each subnet after the VPC is created. A link:vpc/latest/userguide/VPC_SecurityGroups.html[security group,type="documentation"] is created that denies all inbound traffic, except from resources deployed into the subnets. A security group also allows all outbound traffic. The subnets are tagged so that [.noloc]`Kubernetes` can deploy internal load balancers to them. If you're creating a VPC with this configuration, see <> for additional requirements and considerations. + +.. Open the link:cloudformation/[{aws} CloudFormation console,type="console"]. +.. From the navigation bar, select an {aws} Region that supports Amazon EKS. +.. Choose *Create stack*, *With new resources (standard)*. +.. Under *Prepare template*, make sure that *Template is ready* is selected and then under *Template source*, select *Amazon S3 URL*. +.. Paste the following URL into the text area under *Amazon S3 URL* and choose *Next*: + +[source,none,subs="verbatim,attributes"] +---- +https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-fully-private-vpc.yaml +---- +.. On the *Specify Details* page, enter the parameters and then choose *Next*. + +*** *Stack name*: Choose a stack name for your {aws} CloudFormation stack. For example, you can call it [.replaceable]`amazon-eks-fully-private-vpc`. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +*** *VpcBlock*: Choose a CIDR block for your VPC. Each node, [.noloc]`Pod`, and load balancer that you deploy is assigned an `IPv4` address from this block. The default `IPv4` values provide enough IP addresses for most implementations, but if it doesn't, then you can change it. For more information, see link:vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing[VPC and subnet sizing,type="documentation"] in the Amazon VPC User Guide. You can also add additional CIDR blocks to the VPC once it's created. +*** *PrivateSubnet01Block*: Specify a CIDR block for subnet 1. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. +*** *PrivateSubnet02Block*: Specify a CIDR block for subnet 2. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. +*** *PrivateSubnet03Block*: Specify a CIDR block for subnet 3. The default value provides enough IP addresses for most implementations, but if it doesn't, then you can change it. +.. (Optional) On the *Options* page, tag your stack resources. Choose *Next*. +.. On the *Review* page, choose *Create*. +.. When your stack is created, select it in the console and choose *Outputs*. +.. Record the *VpcId* for the VPC that was created. You need this when you create your cluster and nodes. +.. Record the *SubnetIds* for the subnets that were created. You need at least two of these when you create your cluster and nodes. +.. (Optional) Any cluster that you deploy to this VPC can assign private `IPv4` addresses to your [.noloc]`Pods` and [.noloc]`services`. If you want deploy clusters to this VPC to assign private `IPv6` addresses to your [.noloc]`Pods` and [.noloc]`services`, make updates to your VPC, subnet, route tables, and security groups. For more information, see link:vpc/latest/userguide/vpc-migrate-ipv6.html[Migrate existing VPCs from IPv4 to IPv6,type="documentation"] in the Amazon VPC User Guide. Amazon EKS requires that your subnets have the `Auto-assign IPv6` addresses option enabled (it's disabled by default). diff --git a/latest/ug/networking/eks-networking-add-ons.adoc b/latest/ug/networking/eks-networking-add-ons.adoc new file mode 100644 index 00000000..1bdbfbdd --- /dev/null +++ b/latest/ug/networking/eks-networking-add-ons.adoc @@ -0,0 +1,4640 @@ +//!!NODE_ROOT
+[.topic] +[[eks-networking-add-ons,eks-networking-add-ons.title]] += Manage networking add-ons for Amazon EKS clusters +:info_doctype: section +:info_title: Manage networking add-ons for Amazon EKS \ + clusters +:info_titleabbrev: Manage networking add-ons +:info_abstract: Learn how to manage networking add-ons for your Amazon EKS cluster, including built-in \ + components like Amazon VPC CNI plugin for Kubernetes, CoreDNS, and kube-proxy, as well as optional \ + {aws} add-ons for load balancing and service mesh. + + +include::../attributes.txt[] + +[abstract] +-- +Learn how to manage networking add-ons for your Amazon EKS cluster, including built-in components like [.noloc]`Amazon VPC CNI plugin for Kubernetes`, [.noloc]`CoreDNS`, and `kube-proxy`, as well as optional {aws} add-ons for load balancing and service mesh. +-- + +Several networking add-ons are available for your Amazon EKS cluster. + + +[[eks-networking-add-ons-built-in,eks-networking-add-ons-built-in.title]] +== Built-in add-ons + +[NOTE] +==== +If you create clusters in any way except by using the console, each cluster comes with the self-managed versions of the built-in add-ons. The self-managed versions can't be managed from the {aws-management-console}, {aws} Command Line Interface, or SDKs. You manage the configuration and upgrades of self-managed add-ons. + +We recommend adding the Amazon EKS type of the add-on to your cluster instead of using the self-managed type of the add-on. If you create clusters in the console, the Amazon EKS type of these add-ons is installed. +==== + +*[.noloc]`Amazon VPC CNI plugin for Kubernetes`*:: +This CNI add-on creates elastic network interfaces and attaches them to your Amazon EC2 nodes. The add-on also assigns a private `IPv4` or `IPv6` address from your VPC to each [.noloc]`Pod` and service. This add-on is installed, by default, on your cluster. For more information, see <>. If you are using hybrid nodes, the VPC CNI is still installed by default but it is prevented from running on your hybrid nodes with an anti-affinity rule. For more information about your CNI options for hybrid nodes, see <>. + + +*[.noloc]`CoreDNS`*:: +[.noloc]`CoreDNS` is a flexible, extensible DNS server that can serve as the [.noloc]`Kubernetes` cluster DNS. [.noloc]`CoreDNS` provides name resolution for all [.noloc]`Pods` in the cluster. This add-on is installed, by default, on your cluster. For more information, see <>. + + +*`kube-proxy`*:: +This add-on maintains network rules on your Amazon EC2 nodes and enables network communication to your [.noloc]`Pods`. This add-on is installed, by default, on your cluster. For more information, see <>. + + +[[eks-networking-add-ons-optional,eks-networking-add-ons-optional.title]] +== Optional {aws} networking add-ons + +*[.noloc]`{aws} Load Balancer Controller`*:: +When you deploy [.noloc]`Kubernetes` service objects of type `loadbalancer`, the controller creates {aws} Network Load Balancers . When you create [.noloc]`Kubernetes` ingress objects, the controller creates {aws} Application Load Balancers. We recommend using this controller to provision Network Load Balancers, rather than using the https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/annotations/#legacy-cloud-provider[legacy Cloud Provider] controller built-in to [.noloc]`Kubernetes`. For more information, see the https://kubernetes-sigs.github.io/aws-load-balancer-controller[{aws} Load Balancer Controller] documentation. + + +*{aws} Gateway API Controller*:: +This controller lets you connect services across multiple [.noloc]`Kubernetes` clusters using the https://gateway-api.sigs.k8s.io/[Kubernetes gateway API]. The controller connects [.noloc]`Kubernetes` services running on Amazon EC2 instances, containers, and serverless functions by using the link:vpc-lattice/latest/ug/what-is-vpc-service-network.html[Amazon VPC Lattice,type="documentation"] service. For more information, see the https://www.gateway-api-controller.eks.aws.dev/[{aws} Gateway API Controller] documentation. + +For more information about add-ons, see <>. + + +[.topic] +[[managing-vpc-cni,managing-vpc-cni.title]] +== Amazon VPC CNI +:info_title: Assign IPs to [.noloc]`Pods` with the Amazon VPC CNI +:info_titleabbrev: Amazon VPC CNI +:info_abstract: Discover how the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on works to assign private IP addresses and create network interfaces for Pods and services in your Amazon EKS cluster. + +[abstract] +-- +Discover how the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on works to assign private IP addresses and create network interfaces for [.noloc]`Pods` and services in your Amazon EKS cluster. +-- + +[TIP] +==== +With Amazon EKS Auto Mode, you don't need to install or upgrade networking add-ons. Auto Mode includes pod networking and load balancing capabilities. + +For more information, see <>. +==== + +The [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on is deployed on each Amazon EC2 node in your Amazon EKS cluster. The add-on creates link:AWSEC2/latest/UserGuide/using-eni.html[elastic network interfaces,type="documentation"] and attaches them to your Amazon EC2 nodes. The add-on also assigns a private `IPv4` or `IPv6` address from your VPC to each [.noloc]`Pod`. + +A version of the add-on is deployed with each Fargate node in your cluster, but you don't update it on Fargate nodes. Other compatible CNI plugins are available for use on Amazon EKS clusters, but this is the only CNI plugin supported by Amazon EKS for nodes that run on {aws} infrastructure. For more information about the other compatible CNI plugins, see <>. The VPC CNI isn't supported for use with hybrid nodes. For more information about your CNI options for hybrid nodes, see <>. + +The following table lists the latest available version of the Amazon EKS add-on type for each [.noloc]`Kubernetes` version. + +[[vpc-cni-latest-available-version,vpc-cni-latest-available-version.title]] +=== [.noloc]`Amazon VPC CNI` versions + +[options="header"] +|=== +| Kubernetes version | Amazon EKS type of VPC CNI version +| 1.31 | v1.19.0-eksbuild.1 +| 1.30 | v1.19.0-eksbuild.1 +| 1.29 | v1.19.0-eksbuild.1 +| 1.28 | v1.19.0-eksbuild.1 +| 1.27 | v1.19.0-eksbuild.1 +| 1.26 | v1.19.0-eksbuild.1 +| 1.25 | v1.19.0-eksbuild.1 +| 1.24 | v1.19.0-eksbuild.1 +| 1.23 | v1.18.5-eksbuild.1 +|=== + +[IMPORTANT] +==== + +If you're self-managing this add-on, the versions in the table might not be the same as the available self-managed versions. For more information about updating the self-managed type of this add-on, see <>. + +==== + +[IMPORTANT] +==== + +To upgrade to VPC CNI v1.12.0 or later, you must upgrade to VPC CNI v1.7.0 first. We recommend that you update one minor version at a time. + +==== + + +[[manage-vpc-cni-add-on-on-considerations,manage-vpc-cni-add-on-on-considerations.title]] +=== Considerations + +The following are considerations for using the feature. + + + +* Versions are specified as `major-version.minor-version.patch-version-eksbuild.build-number`. +* Check version compatibility for each feature. Some features of each release of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` require certain [.noloc]`Kubernetes` versions. When using different Amazon EKS features, if a specific version of the add-on is required, then it's noted in the feature documentation. Unless you have a specific reason for running an earlier version, we recommend running the latest version. + + +[.topic] +[[vpc-add-on-create,vpc-add-on-create.title]] +=== Creating the Amazon VPC CNI (Amazon EKS add-on) + +Use the following steps to create the [.noloc]`Amazon VPC CNI plugin for Kubernetes` Amazon EKS add-on. + +Before you begin, review the considerations. For more information, see <>. + + +[[vpc-add-on-create-prerequisites,vpc-add-on-create-prerequisites.title]] +==== Prerequisites + +The following are prerequisites for the [.noloc]`Amazon VPC CNI plugin for Kubernetes` Amazon EKS add-on. + + + +* An existing Amazon EKS cluster. To deploy one, see <>. +* An existing {aws} Identity and Access Management (IAM) [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you already have one, or to create one, see <>. +* An IAM role with the link:aws-managed-policy/latest/reference/AmazonEKS_CNI_Policy.html[AmazonEKS_CNI_Policy,type="documentation"] IAM policy (if your cluster uses the `IPv4` family) or an IPv6 policy (if your cluster uses the `IPv6` family) attached to it. For more information about the VPC CNI role, see <>. For information about the IPv6 policy, see <>. +* If you're using version `1.7.0` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` and you use custom [.noloc]`Pod` security policies, see <> and <>. + +[IMPORTANT] +==== + +[.noloc]`Amazon VPC CNI plugin for Kubernetes` versions `v1.16.0` to `v1.16.1` removed compatibility with [.noloc]`Kubernetes` versions `1.23` and earlier. VPC CNI version `v1.16.2` restores compatibility with [.noloc]`Kubernetes` versions `1.23` and earlier and CNI spec `v0.4.0`. + +[.noloc]`Amazon VPC CNI plugin for Kubernetes` versions `v1.16.0` to `v1.16.1` implement CNI specification version `v1.0.0`. CNI spec `v1.0.0` is supported on EKS clusters that run the [.noloc]`Kubernetes` versions `v1.24` or later. VPC CNI version `v1.16.0` to `v1.16.1` and CNI spec `v1.0.0` aren't supported on [.noloc]`Kubernetes` version `v1.23` or earlier. For more information about `v1.0.0` of the CNI spec, see https://github.com/containernetworking/cni/blob/spec-v1.0.0/SPEC.md[Container Network Interface (CNI) Specification] on [.noloc]`GitHub`. + +==== + + +[[vpc-add-on-create-procedure,vpc-add-on-create-procedure.title]] +==== Procedure + +After you complete the prerequisites, use the following steps to create the add-on. + +. See which version of the add-on is installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.16.4-eksbuild.2 +---- +. See which type of the add-on is installed on your cluster. Depending on the tool that you created your cluster with, you might not currently have the Amazon EKS add-on type installed on your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name vpc-cni --query addon.addonVersion --output text +---- ++ +If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster and don't need to complete the remaining steps in this procedure. If an error is returned, you don't have the Amazon EKS type of the add-on installed on your cluster. Complete the remaining steps of this procedure to install it. +. Save the configuration of your currently installed add-on. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml +---- +. Create the add-on using the {aws} CLI. If you want to use the {aws-management-console} or `eksctl` to create the add-on, see <> and specify `vpc-cni` for the add-on name. Copy the command that follows to your device. Make the following modifications to the command, as needed, and then run the modified command. ++ +** Replace [.replaceable]`my-cluster` with the name of your cluster. +** Replace [.replaceable]`v1.19.0-eksbuild.1` with the latest version listed in the latest version table for your cluster version. For the latest version table, see <>. +** Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`AmazonEKSVPCCNIRole` with the name of an <> that you've created. Specifying a role requires that you have an IAM [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you have one for your cluster, or to create one, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-addon --cluster-name my-cluster --addon-name vpc-cni --addon-version v1.19.0-eksbuild.1 \ + --service-account-role-arn {arn-aws}iam::111122223333:role/AmazonEKSVPCCNIRole +---- ++ +If you've applied custom settings to your current add-on that conflict with the default settings of the Amazon EKS add-on, creation might fail. If creation fails, you receive an error that can help you resolve the issue. Alternatively, you can add `--resolve-conflicts OVERWRITE` to the previous command. This allows the add-on to overwrite any existing custom settings. Once you've created the add-on, you can update it with your custom settings. +. Confirm that the latest version of the add-on for your cluster's [.noloc]`Kubernetes` version was added to your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name vpc-cni --query addon.addonVersion --output text +---- ++ +It might take several seconds for add-on creation to complete. ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.19.0-eksbuild.1 +---- +. If you made custom settings to your original add-on, before you created the Amazon EKS add-on, use the configuration that you saved in a previous step to update the EKS add-on with your custom settings. Follow the steps in <>. +. (Optional) Install the `cni-metrics-helper` to your cluster. It scrapes elastic network interface and IP address information, aggregates it at a cluster level, and publishes the metrics to Amazon CloudWatch. For more information, see https://github.com/aws/amazon-vpc-cni-k8s/blob/master/cmd/cni-metrics-helper/README.md[cni-metrics-helper] on GitHub. + + +[.topic] +[[vpc-add-on-update,vpc-add-on-update.title]] +=== Updating the Amazon VPC CNI (Amazon EKS add-on) + +Update the Amazon EKS type of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on. If you haven't added the Amazon EKS type of the add-on to your cluster, you can install it by following <>. Or, update the other type of VPC CNI installation by following <>. + +. See which version of the add-on is installed on your cluster. Replace [.replaceable]`my-cluster` with your cluster name. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name vpc-cni --query "addon.addonVersion" --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.16.4-eksbuild.2 +---- ++ +Compare the version with the table of latest versions at <>. If the version returned is the same as the version for your cluster's [.noloc]`Kubernetes` version in the latest version table, then you already have the latest version installed on your cluster and don't need to complete the rest of this procedure. If you receive an error, instead of a version number in your output, then you don't have the Amazon EKS type of the add-on installed on your cluster. You need to create the add-on before you can update it with this procedure. To create the Amazon EKS type of the VPC CNI add-on, you can follow <>. +. Save the configuration of your currently installed add-on. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml +---- +. Update your add-on using the {aws} CLI. If you want to use the {aws-management-console} or `eksctl` to update the add-on, see <>. Copy the command that follows to your device. Make the following modifications to the command, as needed, and then run the modified command. ++ +** Replace [.replaceable]`my-cluster` with the name of your cluster. +** Replace [.replaceable]`v1.19.0-eksbuild.1` with the latest version listed in the latest version table for your cluster version. +** Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`AmazonEKSVPCCNIRole` with the name of an existing IAM role that you've created. To create an IAM role for the VPC CNI, see <>. Specifying a role requires that you have an IAM [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you have one for your cluster, or to create one, see <>. +** The `--resolve-conflicts PRESERVE` option preserves existing configuration values for the add-on. If you've set custom values for add-on settings, and you don't use this option, Amazon EKS overwrites your values with its default values. If you use this option, then we recommend testing any field and value changes on a non-production cluster before updating the add-on on your production cluster. If you change this value to `OVERWRITE`, all settings are changed to Amazon EKS default values. If you've set custom values for any settings, they might be overwritten with Amazon EKS default values. If you change this value to `none`, Amazon EKS doesn't change the value of any settings, but the update might fail. If the update fails, you receive an error message to help you resolve the conflict. +** If you're not updating a configuration setting, remove `--configuration-values '{[.replaceable]``"env":{"AWS_VPC_K8S_CNI_EXTERNALSNAT":"true"}``}'` from the command. If you're updating a configuration setting, replace [.replaceable]`"env":{"AWS_VPC_K8S_CNI_EXTERNALSNAT":"true"}` with the setting that you want to set. In this example, the `AWS_VPC_K8S_CNI_EXTERNALSNAT` environment variable is set to `true`. The value that you specify must be valid for the configuration schema. If you don't know the configuration schema, run `aws eks describe-addon-configuration --addon-name vpc-cni --addon-version [.replaceable]``v1.19.0-eksbuild.1```, replacing [.replaceable]`v1.19.0-eksbuild.1` with the version number of the add-on that you want to see the configuration for. The schema is returned in the output. If you have any existing custom configuration, want to remove it all, and set the values for all settings back to Amazon EKS defaults, remove [.replaceable]`"env":{"AWS_VPC_K8S_CNI_EXTERNALSNAT":"true"}` from the command, so that you have empty `{}`. For an explanation of each setting, see https://github.com/aws/amazon-vpc-cni-k8s#cni-configuration-variables[CNI Configuration Variables] on GitHub. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name vpc-cni --addon-version v1.19.0-eksbuild.1 \ + --service-account-role-arn {arn-aws}iam::111122223333:role/AmazonEKSVPCCNIRole \ + --resolve-conflicts PRESERVE --configuration-values '{"env":{"AWS_VPC_K8S_CNI_EXTERNALSNAT":"true"}}' +---- ++ +It might take several seconds for the update to complete. +. Confirm that the add-on version was updated. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name vpc-cni +---- ++ +It might take several seconds for the update to complete. ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "addon": { + "addonName": "vpc-cni", + "clusterName": "my-cluster", + "status": "ACTIVE", + "addonVersion": "v1.19.0-eksbuild.1", + "health": { + "issues": [] + }, + "addonArn": "{arn-aws}eks:region:111122223333:addon/my-cluster/vpc-cni/74c33d2f-b4dc-8718-56e7-9fdfa65d14a9", + "createdAt": "2023-04-12T18:25:19.319000+00:00", + "modifiedAt": "2023-04-12T18:40:28.683000+00:00", + "serviceAccountRoleArn": "{arn-aws}iam::111122223333:role/AmazonEKSVPCCNIRole", + "tags": {}, + "configurationValues": "{\"env\":{\"AWS_VPC_K8S_CNI_EXTERNALSNAT\":\"true\"}}" + } +} +---- + + +[.topic] +[[vpc-add-on-self-managed-update,vpc-add-on-self-managed-update.title]] +=== Updating the Amazon VPC CNI (self-managed add-on) + +[IMPORTANT] +==== + +We recommend adding the Amazon EKS type of the add-on to your cluster instead of using the self-managed type of the add-on. If you're not familiar with the difference between the types, see <>. For more information about adding an Amazon EKS add-on to your cluster, see <>. If you're unable to use the Amazon EKS add-on, we encourage you to submit an issue about why you can't to the https://github.com/aws/containers-roadmap/issues[Containers roadmap GitHub repository]. + +==== +. Confirm that you don't have the Amazon EKS type of the add-on installed on your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name vpc-cni --query addon.addonVersion --output text +---- ++ +If an error message is returned, you don't have the Amazon EKS type of the add-on installed on your cluster. To self-manage the add-on, complete the remaining steps in this procedure to update the add-on. If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster. To update it, use the procedure in <>, rather than using this procedure. If you're not familiar with the differences between the add-on types, see <>. +. See which version of the container image is currently installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.16.4-eksbuild.2 +---- ++ +Your output might not include the build number. +. Backup your current settings so you can configure the same settings once you've updated your version. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml +---- +To review the available versions and familiarize yourself with the changes in the version that you want to update to, see https://github.com/aws/amazon-vpc-cni-k8s/releases[releases] on [.noloc]`GitHub`. Note that we recommend updating to the same `major`.``minor``.``patch`` version listed in the latest available versions table, even if later versions are available on GitHub. For the latest available version table, see <>. The build versions listed in the table aren't specified in the self-managed versions listed on GitHub. Update your version by completing the tasks in one of the following options: ++ +** If you don't have any custom settings for the add-on, then run the command under the `To apply this release:` heading on GitHub for the https://github.com/aws/amazon-vpc-cni-k8s/releases[release] that you're updating to. +** If you have custom settings, download the manifest file with the following command. Change [.replaceable]`https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.19.0/config/master/aws-k8s-cni.yaml` to the URL for the release on GitHub that you're updating to. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.19.0/config/master/aws-k8s-cni.yaml +---- ++ +If necessary, modify the manifest with the custom settings from the backup you made in a previous step and then apply the modified manifest to your cluster. If your nodes don't have access to the private Amazon EKS Amazon ECR repositories that the images are pulled from (see the lines that start with `image:` in the manifest), then you'll have to download the images, copy them to your own repository, and modify the manifest to pull the images from your repository. For more information, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f aws-k8s-cni.yaml +---- +. Confirm that the new version is now installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.19.0 +---- +. (Optional) Install the `cni-metrics-helper` to your cluster. It scrapes elastic network interface and IP address information, aggregates it at a cluster level, and publishes the metrics to Amazon CloudWatch. For more information, see https://github.com/aws/amazon-vpc-cni-k8s/blob/master/cmd/cni-metrics-helper/README.md[cni-metrics-helper] on GitHub. + + +[.topic] +[[cni-iam-role,cni-iam-role.title]] +=== Configure Amazon VPC CNI plugin to use IRSA +:info_doctype: section +:info_title: Configure Amazon VPC CNI plugin to use IRSA +:info_titleabbrev: Configure VPC CNI for IRSA +:info_abstract: Learn how to configure the [.noloc]`Amazon VPC CNI plugin for Kubernetes` to use IAM roles for service accounts (IRSA) for [.noloc]`Pod` networking in Amazon EKS clusters. + +[abstract] +-- +Learn how to configure the [.noloc]`Amazon VPC CNI plugin for Kubernetes` to use IAM roles for service accounts (IRSA) for [.noloc]`Pod` networking in Amazon EKS clusters. +-- + +The https://github.com/aws/amazon-vpc-cni-k8s[Amazon VPC CNI plugin for Kubernetes] is the networking plugin for [.noloc]`Pod` networking in Amazon EKS clusters. The plugin is responsible for allocating VPC IP addresses to [.noloc]`Kubernetes` nodes and configuring the necessary networking for [.noloc]`Pods` on each node. The plugin: + + +* Requires {aws} Identity and Access Management (IAM) permissions. If your cluster uses the `IPv4` family, the permissions are specified in the ` link:aws-managed-policy/latest/reference/AmazonEKS_CNI_Policy.html[AmazonEKS_CNI_Policy,type="documentation"]` {aws} managed policy.If your cluster uses the `IPv6` family, then the permissions must be added to an IAM policy that you create; for instructions, see <>. You can attach the policy to the Amazon EKS node IAM role, or to a separate IAM role. For instructions to attach the policy to the Amazon EKS node IAM role, see <>. We recommend that you assign it to a separate role, as detailed in this topic. +* Creates and is configured to use a [.noloc]`Kubernetes` service account named `aws-node` when it's deployed. The service account is bound to a [.noloc]`Kubernetes` `clusterrole` named `aws-node`, which is assigned the required [.noloc]`Kubernetes` permissions. + + +[NOTE] +==== + +The [.noloc]`Pods` for the [.noloc]`Amazon VPC CNI plugin for Kubernetes` have access to the permissions assigned to the <>, unless you block access to IMDS. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. + +==== + +* An existing Amazon EKS cluster. To deploy one, see <>. +* An existing {aws} Identity and Access Management (IAM) [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you already have one, or to create one, see <>. + + +[[cni-iam-role-create-role,cni-iam-role-create-role.title]] +==== Step 1: Create the [.noloc]`Amazon VPC CNI plugin for Kubernetes` IAM role +. Determine the IP family of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster | grep ipFamily +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +"ipFamily": "ipv4" +---- ++ +The output may return `ipv6` instead. +. Create the IAM role. You can use `eksctl` or `kubectl` and the {aws} CLI to create your IAM role. ++ +eksctl::: +** Create an IAM role and attach the IAM policy to the role with the command that matches the IP family of your cluster. The command creates and deploys an {aws} CloudFormation stack that creates an IAM role, attaches the policy that you specify to it, and annotates the existing `aws-node` [.noloc]`Kubernetes` service account with the ARN of the IAM role that is created. ++ +*** `IPv4` ++ +Replace [.replaceable]`my-cluster` with your own value. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create iamserviceaccount \ + --name aws-node \ + --namespace kube-system \ + --cluster my-cluster \ + --role-name AmazonEKSVPCCNIRole \ + --attach-policy-arn {arn-aws}iam::aws:policy/AmazonEKS_CNI_Policy \ + --override-existing-serviceaccounts \ + --approve +---- +*** `IPv6` ++ +Replace [.replaceable]`my-cluster` with your own value. Replace [.replaceable]`111122223333` with your account ID and replace [.replaceable]`AmazonEKS_CNI_IPv6_Policy` with the name of your `IPv6` policy. If you don't have an `IPv6` policy, see <> to create one. To use `IPv6` with your cluster, it must meet several requirements. For more information, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create iamserviceaccount \ + --name aws-node \ + --namespace kube-system \ + --cluster my-cluster \ + --role-name AmazonEKSVPCCNIRole \ + --attach-policy-arn {arn-aws}iam::111122223333:policy/AmazonEKS_CNI_IPv6_Policy \ + --override-existing-serviceaccounts \ + --approve +---- + + +kubectl and the {aws} CLI::: +... View your cluster's OIDC provider URL. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query "cluster.identity.oidc.issuer" --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +https://oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE +---- ++ +If no output is returned, then you must <>. +... Copy the following contents to a file named [.replaceable]`vpc-cni-trust-policy.json`. Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`EXAMPLED539D4633E53DE1B71EXAMPLE` with the output returned in the previous step. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "{arn-aws}iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud": "sts.amazonaws.com", + "oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub": "system:serviceaccount:kube-system:aws-node" + } + } + } + ] +} +---- +... Create the role. You can replace [.replaceable]`AmazonEKSVPCCNIRole` with any name that you choose. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam create-role \ + --role-name AmazonEKSVPCCNIRole \ + --assume-role-policy-document file://"vpc-cni-trust-policy.json" +---- +... Attach the required IAM policy to the role. Run the command that matches the IP family of your cluster. ++ +**** `IPv4` ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::aws:policy/AmazonEKS_CNI_Policy \ + --role-name AmazonEKSVPCCNIRole +---- +**** `IPv6` ++ +Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`AmazonEKS_CNI_IPv6_Policy` with the name of your `IPv6` policy. If you don't have an `IPv6` policy, see <> to create one. To use `IPv6` with your cluster, it must meet several requirements. For more information, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --policy-arn {arn-aws}iam::111122223333:policy/AmazonEKS_CNI_IPv6_Policy \ + --role-name AmazonEKSVPCCNIRole +---- +... Run the following command to annotate the `aws-node` service account with the ARN of the IAM role that you created previously. Replace the [.replaceable]`example values` with your own values. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl annotate serviceaccount \ + -n kube-system aws-node \ + eks.amazonaws.com/role-arn={arn-aws}iam::111122223333:role/AmazonEKSVPCCNIRole +---- +. (Optional) Configure the {aws} Security Token Service endpoint type used by your [.noloc]`Kubernetes` service account. For more information, see <>. + + +[[cni-iam-role-redeploy-pods,cni-iam-role-redeploy-pods.title]] +==== Step 2: Re-deploy [.noloc]`Amazon VPC CNI plugin for Kubernetes` [.noloc]`Pods` +. Delete and re-create any existing [.noloc]`Pods` that are associated with the service account to apply the credential environment variables. The annotation is not applied to [.noloc]`Pods` that are currently running without the annotation. The following command deletes the existing `aws-node` [.noloc]`DaemonSet` [.noloc]`Pods` and deploys them with the service account annotation. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete Pods -n kube-system -l k8s-app=aws-node +---- +. Confirm that the [.noloc]`Pods` all restarted. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kube-system -l k8s-app=aws-node +---- +. Describe one of the [.noloc]`Pods` and verify that the `AWS_WEB_IDENTITY_TOKEN_FILE` and `AWS_ROLE_ARN` environment variables exist. Replace [.replaceable]`cpjw7` with the name of one of your [.noloc]`Pods` returned in the output of the previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe pod -n kube-system aws-node-cpjw7 | grep 'AWS_ROLE_ARN:\|AWS_WEB_IDENTITY_TOKEN_FILE:' +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +AWS_ROLE_ARN: {arn-aws}iam::111122223333:role/AmazonEKSVPCCNIRole + AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token + AWS_ROLE_ARN: {arn-aws}iam::111122223333:role/AmazonEKSVPCCNIRole + AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +---- ++ +Two sets of duplicate results are returned because the [.noloc]`Pod` contains two containers. Both containers have the same values. ++ +If your [.noloc]`Pod` is using the {aws} Regional endpoint, then the following line is also returned in the previous output. ++ +[source,bash,subs="verbatim,attributes"] +---- +AWS_STS_REGIONAL_ENDPOINTS=regional +---- + + +[[remove-cni-policy-node-iam-role,remove-cni-policy-node-iam-role.title]] +==== Step 3: Remove the CNI policy from the node IAM role + +If your <> currently has the `AmazonEKS_CNI_Policy` IAM (`IPv4`) policyor an <>attached to it, and you've created a separate IAM role, attached the policy to it instead, and assigned it to the `aws-node` [.noloc]`Kubernetes` service account, then we recommend that you remove the policy from your node role with the {aws} CLI command that matches the IP family of your cluster. Replace [.replaceable]`AmazonEKSNodeRole` with the name of your node role. + + + +* `IPv4` ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam detach-role-policy --role-name AmazonEKSNodeRole --policy-arn {arn-aws}iam::aws:policy/AmazonEKS_CNI_Policy +---- +* `IPv6` ++ +Replace [.replaceable]`111122223333` with your account ID and [.replaceable]`AmazonEKS_CNI_IPv6_Policy` with the name of your `IPv6` policy. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam detach-role-policy --role-name AmazonEKSNodeRole --policy-arn {arn-aws}iam::111122223333:policy/AmazonEKS_CNI_IPv6_Policy +---- + + +[[cni-iam-role-create-ipv6-policy,cni-iam-role-create-ipv6-policy.title]] +==== Create IAM policy for clusters that use the `IPv6` family + +If you created a cluster that uses the `IPv6` family and the cluster has version `1.10.1` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on configured, then you need to create an IAM policy that you can assign to an IAM role. If you have an existing cluster that you didn't configure with the `IPv6` family when you created it, then to use `IPv6`, you must create a new cluster. For more information about using `IPv6` with your cluster, see <>. + +. Copy the following text and save it to a file named `vpc-cni-ipv6-policy.json`. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignIpv6Addresses", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeInstanceTypes" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": [ + "{arn-aws}ec2:*:*:network-interface/*" + ] + } + ] +} +---- +. Create the IAM policy. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam create-policy --policy-name AmazonEKS_CNI_IPv6_Policy --policy-document file://vpc-cni-ipv6-policy.json +---- + + +[.topic] +[[pod-networking-use-cases,pod-networking-use-cases.title]] +=== Learn about VPC CNI modes and configuration + +[abstract] +-- +Discover how [.noloc]`Amazon VPC CNI plugin for Kubernetes` provides pod networking capabilities and settings for different Amazon EKS node types and use cases, including security groups, [.noloc]`Kubernetes` network policies, custom networking, IPv4, and IPv6 support. +-- + +The [.noloc]`Amazon VPC CNI plugin for Kubernetes` provides networking for [.noloc]`Pods`. Use the following table to learn more about the available networking features. + +[cols="1,1", options="header"] +|=== +|Networking feature +|Learn more + + +|Configure your cluster to assign IPv6 addresses to clusters, [.noloc]`Pods`, and services +|<> + +|Use IPv4 Source Network Address Translation for [.noloc]`Pods` +|<> + +|Restrict network traffic to and from your [.noloc]`Pods` +|<> + +|Customize the secondary network interface in nodes +|<> + +|Increase IP addresses for your node +|<> + +|Use security groups for [.noloc]`Pod` network traffic +|<> + +|Use multiple network interfaces for [.noloc]`Pods` +|<> +|=== + +[.topic] +[[cni-ipv6,cni-ipv6.title]] +==== Learn about IPv6 addresses to clusters, [.noloc]`pods`, and services + +[abstract] +-- +Learn how to deploy an `IPv6` cluster and nodes with Amazon EKS for assigning `IPv6` addresses to [.noloc]`Pods` and [.noloc]`services` instead of `IPv4`, leveraging IP prefix delegation and the latest [.noloc]`Amazon VPC CNI` plugin. +-- + +*Applies to*: [.noloc]`Pods` with Amazon EC2 instances and Fargate [.noloc]`Pods` + +By default, [.noloc]`Kubernetes` assigns `IPv4` addresses to your [.noloc]`Pods` and [.noloc]`services`. Instead of assigning `IPv4` addresses to your [.noloc]`Pods` and [.noloc]`services`, you can configure your cluster to assign `IPv6` addresses to them. Amazon EKS doesn't support dual-stacked [.noloc]`Pods` or [.noloc]`services`, even though [.noloc]`Kubernetes` does in version `1.23` and later. As a result, you can't assign both `IPv4` and `IPv6` addresses to your [.noloc]`Pods` and [.noloc]`services`. + +You select which IP family you want to use for your cluster when you create it. You can't change the family after you create the cluster. + +For a tutorial to deploy an Amazon EKS `IPv6` cluster, see <>. + +//[[ipv6-considerations,ipv6-considerations.title]] +//===== Considerations + +The following are considerations for using the feature: + +===== `IPv6` Feature support + +* *No [.noloc]`Windows` support*: [.noloc]`Windows` [.noloc]`Pods` and [.noloc]`services` aren't supported. +* *Nitro-based EC2 nodes required*: You can only use `IPv6` with {aws} Nitro-based Amazon EC2 or Fargate nodes. +* *EC2 and Fargate nodes supported*: You can use `IPv6` with <> with Amazon EC2 nodes and Fargate nodes. +* *Outposts not supported*: You can't use `IPv6` with <>. +* *FSx for Lustre is not supported*: The <> is not supported. +* *Instance Metadata Service not supported*: Use of the Amazon EC2 link:AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html[Instance Metadata Service,type="documentation"] `IPv6` endpoint is not supported with Amazon EKS. +* *Custom networking not supported*: If you previously used <> to help alleviate IP address exhaustion, you can use `IPv6` instead. You can't use custom networking with `IPv6`. If you use custom networking for network isolation, then you might need to continue to use custom networking and the `IPv4` family for your clusters. + + +===== IP address assignments + +* *Kubernetes services*: Kubernetes services are only assigned an `IPv6` addresses. They aren't assigned IPv4 addresses. +* *Pods*: Pods are assigned an IPv6 address and a host-local IPv4 address. The host-local IPv4 address is assigned by using a host-local CNI plugin chained with VPC CNI and the address is not reported to the Kubernetes control plane. It is only used when a pod needs to communicate with an external IPv4 resources in another Amazon VPC or the internet. The host-local IPv4 address gets SNATed (by VPC CNI) to the primary IPv4 address of the primary ENI of the worker node. +* *Pods and services*: [.noloc]`Pods` and [.noloc]`services` are only assigned an `IPv6` address. They aren't assigned an `IPv4` address. Because [.noloc]`Pods` are able to communicate to `IPv4` endpoints through NAT on the instance itself, link:vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64[DNS64 and NAT64,type="documentation"] aren't needed. If the traffic needs a public IP address, the traffic is then source network address translated to a public IP. +* *Routing addresses*: The source `IPv6` address of a [.noloc]`Pod` isn't source network address translated to the `IPv6` address of the node when communicating outside of the VPC. It is routed using an internet gateway or egress-only internet gateway. +* *Nodes*: All nodes are assigned an `IPv4` and `IPv6` address. +* *Fargate [.noloc]`Pods`*: Each Fargate [.noloc]`Pod` receives an `IPv6` address from the CIDR that's specified for the subnet that it's deployed in. The underlying hardware unit that runs Fargate [.noloc]`Pods` gets a unique `IPv4` and `IPv6` address from the CIDRs that are assigned to the subnet that the hardware unit is deployed in. + + +===== How to use `IPv6` with EKS + +* *Create new cluster*: You must create a new cluster and specify that you want to use the `IPv6` family for that cluster. You can't enable the `IPv6` family for a cluster that you updated from a previous version. For instructions on how to create a new cluster, see Considerations . +* *Use recent VPC CNI*: Deploy Amazon VPC CNI version `1.10.1` or later. This version or later is deployed by default. After you deploy the add-on, you can't downgrade your Amazon VPC CNI add-on to a version lower than `1.10.1` without first removing all nodes in all node groups in your cluster. +* *Configure VPC CNI for `IPv6`*: If you use Amazon EC2 nodes, you must configure the Amazon VPC CNI add-on with IP prefix delegation and `IPv6`. If you choose the `IPv6` family when creating your cluster, the `1.10.1` version of the add-on defaults to this configuration. This is the case for both a self-managed or Amazon EKS add-on. For more information about IP prefix delegation, see <>. +* *Configure `IPv4` and `IPv6` addresses*: When you create a cluster, the VPC and subnets that you specify must have an `IPv6` CIDR block that's assigned to the VPC and subnets that you specify. They must also have an `IPv4` CIDR block assigned to them. This is because, even if you only want to use `IPv6`, a VPC still requires an `IPv4` CIDR block to function. For more information, see link:vpc/latest/userguide/working-with-vpcs.html#vpc-associate-ipv6-cidr[Associate an IPv6 CIDR block with your VPC,type="documentation"] in the Amazon VPC User Guide. +* *Auto-assign IPv6 addresses to nodes:* When you create your nodes, you must specify subnets that are configured to auto-assign `IPv6` addresses. Otherwise, you can't deploy your nodes. By default, this configuration is disabled. For more information, see link:vpc/latest/userguide/vpc-ip-addressing.html#subnet-ipv6[Modify the IPv6 addressing attribute for your subnet,type="documentation"] in the Amazon VPC User Guide. +* *Set route tables to use `IPv6`*: The route tables that are assigned to your subnets must have routes for `IPv6` addresses. For more information, see link:vpc/latest/userguide/vpc-migrate-ipv6.html[Migrate to IPv6,type="documentation"] in the Amazon VPC User Guide. +* *Set security groups for `IPv6`*: Your security groups must allow `IPv6` addresses. For more information, see link:vpc/latest/userguide/vpc-migrate-ipv6.html[Migrate to IPv6,type="documentation"] in the Amazon VPC User Guide. +* *Set up load balancer*: Use version `2.3.1` or later of the {aws} Load Balancer Controller to load balance HTTP applications using the <> or network traffic using the <> to `IPv6` [.noloc]`Pods` with either load balancer in IP mode, but not instance mode. For more information, see <>. +* *Add `IPv6` IAM policy*: You must attach an `IPv6` IAM policy to your node IAM or CNI IAM role. Between the two, we recommend that you attach it to a CNI IAM role. For more information, see <> and <>. +* *Evaluate all components*: Perform a thorough evaluation of your applications, Amazon EKS add-ons, and {aws} services that you integrate with before deploying `IPv6` clusters. This is to ensure that everything works as expected with `IPv6`. +* *Add `BootstrapArguments` self-managed node groups*: When creating a self-managed node group in a cluster that uses the `IPv6` family, user-data must include the following `BootstrapArguments` for the https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap.sh] file that runs at node start up. Replace [.replaceable]`your-cidr` with the `IPv6` [.noloc]`CIDR` range of your cluster's VPC. ++ +[source,bash,subs="verbatim,attributes"] +---- +--ip-family ipv6 --service-ipv6-cidr your-cidr +---- ++ +If you don't know the `IPv6` `CIDR` range for your cluster, you can see it with the following command (requires the {aws} CLI version `2.4.9` or later). ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query cluster.kubernetesNetworkConfig.serviceIpv6Cidr --output text +---- + + +[.topic] +[[deploy-ipv6-cluster,deploy-ipv6-cluster.title]] +===== Deploying an Amazon EKS `IPv6` cluster and managed Amazon Linux nodes + +In this tutorial, you deploy an `IPv6` Amazon VPC, an Amazon EKS cluster with the `IPv6` family, and a managed node group with Amazon EC2 Amazon Linux nodes. You can't deploy Amazon EC2 [.noloc]`Windows` nodes in an `IPv6` cluster. You can also deploy Fargate nodes to your cluster, though those instructions aren't provided in this topic for simplicity. + +====== Prerequisites + +Complete the following before you start the tutorial: + +Install and configure the following tools and resources that you need to create and manage an Amazon EKS cluster. + +* We recommend that you familiarize yourself with all settings and deploy a cluster with the settings that meet your requirements. For more information, see <>, <>, and the <> for this topic. You can only enable some settings when creating your cluster. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* The IAM security principal that you're using must have permissions to work with Amazon EKS IAM roles, service linked roles, {aws} CloudFormation, a VPC, and related resources. For more information, see link:service-authorization/latest/reference/list_amazonelastickubernetesservice.html[Actions, resources, and condition keys for Amazon Elastic Kubernetes Service,type="documentation"] and link:IAM/latest/UserGuide/using-service-linked-roles.html[Using service-linked roles,type="documentation"] in the IAM User Guide. +* If you use the [.noloc]`eksctl`, install version `{eksctl-min-version}` or later on your computer. To install or update to it, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. If you use the {aws} CloudShell, you may need to link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[install version 2.12.3 or later or 1.27.160 or later of the {aws} CLI,type="documentation"], because the default {aws} CLI version installed in the {aws} CloudShell may be an earlier version. + + +//[[deploy-ipv6-cluster-procedure,deploy-ipv6-cluster-procedure.title]] +//====== Procedure + +You can use the [.noloc]`eksctl` or CLI to deploy an `IPv6` cluster. + + +====== Deploy an IPv6 cluster with [.noloc]`eksctl` + +.. Create the `ipv6-cluster.yaml` file. Copy the command that follows to your device. Make the following modifications to the command as needed and then run the modified command: ++ +*** Replace [.replaceable]`my-cluster` with a name for your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. +*** Replace [.replaceable]`region-code` with any {aws} Region that is supported by Amazon EKS. For a list of {aws} Regions, see link:general/latest/gr/eks.html[Amazon EKS endpoints and quotas,type="documentation"] in the {aws} General Reference guide. +*** The value for `version` with the version of your cluster. For more information, see <>. +*** Replace [.replaceable]`my-nodegroup` with a name for your node group. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. +*** Replace [.replaceable]`t3.medium` with any link:AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances[{aws} Nitro System instance type,type="documentation"]. ++ +[source,yaml,subs="verbatim,attributes"] +---- +cat >ipv6-cluster.yaml < +aws-node-t74jh 1/1 Running 0 5m32s 2600:1f13:b66:8203:4516:2080:8ced:1ca9 ip-192-168-253-70.region-code.compute.internal +coredns-85d5b4454c-cw7w2 1/1 Running 0 56m 2600:1f13:b66:8203:34e5:: ip-192-168-253-70.region-code.compute.internal +coredns-85d5b4454c-tx6n8 1/1 Running 0 56m 2600:1f13:b66:8203:34e5::1 ip-192-168-253-70.region-code.compute.internal +kube-proxy-btpbk 1/1 Running 0 5m36s 2600:1f13:b66:8200:11a5:ade0:c590:6ac8 ip-192-168-34-75.region-code.compute.internal +kube-proxy-jjk2g 1/1 Running 0 5m33s 2600:1f13:b66:8203:4516:2080:8ced:1ca9 ip-192-168-253-70.region-code.compute.internal +---- +.. Confirm that default services are assigned `IPv6` addresses. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get services -n kube-system -o wide +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +kube-dns ClusterIP fd30:3087:b6c2::a 53/UDP,53/TCP 57m k8s-app=kube-dns +---- +.. (Optional) <> or deploy the <> and a sample application to load balance HTTP applications with <> or network traffic with <> to `IPv6` [.noloc]`Pods`. +.. After you've finished with the cluster and nodes that you created for this tutorial, you should clean up the resources that you created with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl delete cluster my-cluster +---- + + +====== Deploy an IPv6 cluster with {aws} CLI + +[IMPORTANT] +==== +** You must complete all steps in this procedure as the same user. To check the current user, run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws sts get-caller-identity +---- +** You must complete all steps in this procedure in the same shell. Several steps use variables set in previous steps. Steps that use variables won't function properly if the variable values are set in a different shell. If you use the link:cloudshell/latest/userguide/welcome.html[{aws} CloudShell,type="documentation"] to complete the following procedure, remember that if you don't interact with it using your keyboard or pointer for approximately 20–30 minutes, your shell session ends. Running processes do not count as interactions. +** The instructions are written for the Bash shell, and may need adjusting in other shells. +==== + + +Replace all [.replaceable]`example values` in the steps of this procedure with your own values. + +.. Run the following commands to set some variables used in later steps. Replace [.replaceable]`region-code` with the {aws} Region that you want to deploy your resources in. The value can be any {aws} Region that is supported by Amazon EKS. For a list of {aws} Regions, see link:general/latest/gr/eks.html[Amazon EKS endpoints and quotas,type="documentation"] in the {aws} General Reference guide. Replace [.replaceable]`my-cluster` with a name for your cluster. The name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphanumeric character and can't be longer than 100 characters. The name must be unique within the {aws} Region and {aws} account that you're creating the cluster in. Replace [.replaceable]`my-nodegroup` with a name for your node group. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. Replace [.replaceable]`111122223333` with your account ID. ++ +[source,bash,subs="verbatim,attributes"] +---- +export region_code=region-code +export cluster_name=my-cluster +export nodegroup_name=my-nodegroup +export account_id=111122223333 +---- +.. Create an Amazon VPC with public and private subnets that meets Amazon EKS and `IPv6` requirements. ++ +... Run the following command to set a variable for your {aws} CloudFormation stack name. You can replace [.replaceable]`my-eks-ipv6-vpc` with any name you choose. ++ +[source,bash,subs="verbatim,attributes"] +---- +export vpc_stack_name=my-eks-ipv6-vpc +---- +... Create an `IPv6` VPC using an {aws} CloudFormation template. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation create-stack --region $region_code --stack-name $vpc_stack_name \ + --template-url https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-ipv6-vpc-public-private-subnets.yaml +---- ++ +The stack takes a few minutes to create. Run the following command. Don't continue to the next step until the output of the command is `CREATE_COMPLETE`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation describe-stacks --region $region_code --stack-name $vpc_stack_name --query Stacks[].StackStatus --output text +---- +... Retrieve the IDs of the public subnets that were created. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation describe-stacks --region $region_code --stack-name $vpc_stack_name \ + --query='Stacks[].Outputs[?OutputKey==`SubnetsPublic`].OutputValue' --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +subnet-0a1a56c486EXAMPLE,subnet-099e6ca77aEXAMPLE +---- +... Enable the auto-assign `IPv6` address option for the public subnets that were created. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 modify-subnet-attribute --region $region_code --subnet-id subnet-0a1a56c486EXAMPLE --assign-ipv6-address-on-creation +aws ec2 modify-subnet-attribute --region $region_code --subnet-id subnet-099e6ca77aEXAMPLE --assign-ipv6-address-on-creation +---- +... Retrieve the names of the subnets and security groups created by the template from the deployed {aws} CloudFormation stack and store them in variables for use in a later step. ++ +[source,bash,subs="verbatim,attributes"] +---- +security_groups=$(aws cloudformation describe-stacks --region $region_code --stack-name $vpc_stack_name \ + --query='Stacks[].Outputs[?OutputKey==`SecurityGroups`].OutputValue' --output text) + +public_subnets=$(aws cloudformation describe-stacks --region $region_code --stack-name $vpc_stack_name \ + --query='Stacks[].Outputs[?OutputKey==`SubnetsPublic`].OutputValue' --output text) + +private_subnets=$(aws cloudformation describe-stacks --region $region_code --stack-name $vpc_stack_name \ + --query='Stacks[].Outputs[?OutputKey==`SubnetsPrivate`].OutputValue' --output text) + +subnets=${public_subnets},${private_subnets} +---- +.. Create a cluster IAM role and attach the required Amazon EKS IAM managed policy to it. [.noloc]`Kubernetes` clusters managed by Amazon EKS make calls to other {aws} services on your behalf to manage the resources that you use with the service. ++ +... Run the following command to create the `eks-cluster-role-trust-policy.json` file. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >eks-cluster-role-trust-policy.json <>. ++ +The cluster takes several minutes to create. Run the following command. Don't continue to the next step until the output from the command is `ACTIVE`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --region $region_code --name $cluster_name --query cluster.status +---- +.. Create or update a `kubeconfig` file for your cluster so that you can communicate with your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-kubeconfig --region $region_code --name $cluster_name +---- ++ +By default, the `config` file is created in `~/.kube` or the new cluster's configuration is added to an existing `config` file in `~/.kube`. +.. Create a node IAM role. ++ +... Run the following command to create the `vpc-cni-ipv6-policy.json` file. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >vpc-cni-ipv6-policy <node-role-trust-relationship.json <>. +... Attach two required IAM managed policies to the IAM role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy --policy-arn {arn-aws}iam::aws:policy/AmazonEKSWorkerNodePolicy \ + --role-name $node_role_name +aws iam attach-role-policy --policy-arn {arn-aws}iam::aws:policy/AmazonEC2ContainerRegistryReadOnly \ + --role-name $node_role_name +---- +... Retrieve the ARN of the IAM role and store it in a variable for a later step. ++ +[source,bash,subs="verbatim,attributes"] +---- +node_iam_role=$(aws iam get-role --role-name $node_role_name --query="Role.Arn" --output text) +---- +.. Create a managed node group. ++ +... View the IDs of the subnets that you created in a previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +echo $subnets +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +subnet-0a1a56c486EXAMPLE,subnet-099e6ca77aEXAMPLE,subnet-0377963d69EXAMPLE,subnet-0c05f819d5EXAMPLE +---- +... Create the node group. Replace [.replaceable]`0a1a56c486EXAMPLE`, [.replaceable]`099e6ca77aEXAMPLE`, [.replaceable]`0377963d69EXAMPLE`, and [.replaceable]`0c05f819d5EXAMPLE` with the values returned in the output of the previous step. Be sure to remove the commas between subnet IDs from the previous output in the following command. You can replace [.replaceable]`t3.medium` with any link:AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances[{aws} Nitro System instance type,type="documentation"]. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-nodegroup --region $region_code --cluster-name $cluster_name --nodegroup-name $nodegroup_name \ + --subnets subnet-0a1a56c486EXAMPLE subnet-099e6ca77aEXAMPLE subnet-0377963d69EXAMPLE subnet-0c05f819d5EXAMPLE \ + --instance-types t3.medium --node-role $node_iam_role +---- ++ +The node group takes a few minutes to create. Run the following command. Don't proceed to the next step until the output returned is `ACTIVE`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-nodegroup --region $region_code --cluster-name $cluster_name --nodegroup-name $nodegroup_name \ + --query nodegroup.status --output text +---- +.. Confirm that the default [.noloc]`Pods` are assigned `IPv6` addresses in the `IP` column. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kube-system -o wide +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +aws-node-rslts 1/1 Running 1 5m36s 2600:1f13:b66:8200:11a5:ade0:c590:6ac8 ip-192-168-34-75.region-code.compute.internal +aws-node-t74jh 1/1 Running 0 5m32s 2600:1f13:b66:8203:4516:2080:8ced:1ca9 ip-192-168-253-70.region-code.compute.internal +coredns-85d5b4454c-cw7w2 1/1 Running 0 56m 2600:1f13:b66:8203:34e5:: ip-192-168-253-70.region-code.compute.internal +coredns-85d5b4454c-tx6n8 1/1 Running 0 56m 2600:1f13:b66:8203:34e5::1 ip-192-168-253-70.region-code.compute.internal +kube-proxy-btpbk 1/1 Running 0 5m36s 2600:1f13:b66:8200:11a5:ade0:c590:6ac8 ip-192-168-34-75.region-code.compute.internal +kube-proxy-jjk2g 1/1 Running 0 5m33s 2600:1f13:b66:8203:4516:2080:8ced:1ca9 ip-192-168-253-70.region-code.compute.internal +---- +.. Confirm that the default services are assigned `IPv6` addresses in the `IP` column. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get services -n kube-system -o wide +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +kube-dns ClusterIP fd30:3087:b6c2::a 53/UDP,53/TCP 57m k8s-app=kube-dns +---- +.. (Optional) <> or deploy the <> and a sample application to load balance HTTP applications with <> or network traffic with <> to `IPv6` [.noloc]`Pods`. +.. After you've finished with the cluster and nodes that you created for this tutorial, you should clean up the resources that you created with the following commands. Make sure that you're not using any of the resources outside of this tutorial before deleting them. ++ +... If you're completing this step in a different shell than you completed the previous steps in, set the values of all the variables used in previous steps, replacing the [.replaceable]`example values` with the values you specified when you completed the previous steps. If you're completing this step in the same shell that you completed the previous steps in, skip to the next step. ++ +[source,bash,subs="verbatim,attributes"] +---- +export region_code=region-code +export vpc_stack_name=my-eks-ipv6-vpc +export cluster_name=my-cluster +export nodegroup_name=my-nodegroup +export account_id=111122223333 +export node_role_name=AmazonEKSNodeRole +export cluster_role_name=myAmazonEKSClusterRole +---- +... Delete your node group. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-nodegroup --region $region_code --cluster-name $cluster_name --nodegroup-name $nodegroup_name +---- ++ +Deletion takes a few minutes. Run the following command. Don't proceed to the next step if any output is returned. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks list-nodegroups --region $region_code --cluster-name $cluster_name --query nodegroups --output text +---- +... Delete the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-cluster --region $region_code --name $cluster_name +---- ++ +The cluster takes a few minutes to delete. Before continuing make sure that the cluster is deleted with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --region $region_code --name $cluster_name +---- ++ +Don't proceed to the next step until your output is similar to the following output. ++ +[source,bash,subs="verbatim,attributes"] +---- +An error occurred (ResourceNotFoundException) when calling the DescribeCluster operation: No cluster found for name: my-cluster. +---- +... Delete the IAM resources that you created. Replace [.replaceable]`AmazonEKS_CNI_IPv6_Policy` with the name you chose, if you chose a different name than the one used in previous steps. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam detach-role-policy --role-name $cluster_role_name --policy-arn {arn-aws}iam::aws:policy/AmazonEKSClusterPolicy +aws iam detach-role-policy --role-name $node_role_name --policy-arn {arn-aws}iam::aws:policy/AmazonEKSWorkerNodePolicy +aws iam detach-role-policy --role-name $node_role_name --policy-arn {arn-aws}iam::aws:policy/AmazonEC2ContainerRegistryReadOnly +aws iam detach-role-policy --role-name $node_role_name --policy-arn {arn-aws}iam::$account_id:policy/AmazonEKS_CNI_IPv6_Policy +aws iam delete-policy --policy-arn {arn-aws}iam::$account_id:policy/AmazonEKS_CNI_IPv6_Policy +aws iam delete-role --role-name $cluster_role_name +aws iam delete-role --role-name $node_role_name +---- +... Delete the {aws} CloudFormation stack that created the VPC. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation delete-stack --region $region_code --stack-name $vpc_stack_name +---- + + +[.topic] +[[external-snat,external-snat.title]] +==== Enable outbound internet access for [.noloc]`pods` + +[abstract] +-- +Learn how Amazon EKS manages external communication for [.noloc]`Pods` using Source Network Address Translation (SNAT), allowing Pods to access internet resources or networks connected via VPC peering, Transit Gateway, or {aws} Direct Connect. +-- + +*Applies to*: [.noloc]`Linux` `IPv4` Fargate nodes, [.noloc]`Linux` nodes with Amazon EC2 instances + +If you deployed your cluster using the `IPv6` family, then the information in this topic isn't applicable to your cluster, because `IPv6` addresses are not network translated. For more information about using `IPv6` with your cluster, see <>. + +By default, each [.noloc]`Pod` in your cluster is assigned a link:AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-private-addresses[private,type="documentation"]``IPv4`` address from a classless inter-domain routing (CIDR) block that is associated with the VPC that the [.noloc]`Pod` is deployed in. [.noloc]`Pods` in the same VPC communicate with each other using these private IP addresses as end points. When a [.noloc]`Pod` communicates to any `IPv4` address that isn't within a CIDR block that's associated to your VPC, the Amazon VPC CNI plugin (for both https://github.com/aws/amazon-vpc-cni-k8s#amazon-vpc-cni-k8s[Linux] or https://github.com/aws/amazon-vpc-cni-plugins/tree/master/plugins/vpc-bridge[Windows]) translates the [.noloc]`Pod's` `IPv4` address to the primary private `IPv4` address of the primary link:AWSEC2/latest/UserGuide/using-eni.html#eni-basics[elastic network interface,type="documentation"] of the node that the [.noloc]`Pod` is running on, by default ^^<>^^. + +[NOTE] +==== + +For [.noloc]`Windows` nodes, there are additional details to consider. By default, the https://github.com/aws/amazon-vpc-cni-plugins/tree/master/plugins/vpc-bridge[VPC CNI plugin for Windows] is defined with a networking configuration in which the traffic to a destination within the same VPC is excluded for SNAT. This means that internal VPC communication has SNAT disabled and the IP address allocated to a [.noloc]`Pod` is routable inside the VPC. But traffic to a destination outside of the VPC has the source [.noloc]`Pod` IP SNAT'ed to the instance ENI's primary IP address. This default configuration for [.noloc]`Windows` ensures that the pod can access networks outside of your VPC in the same way as the host instance. + +==== + +Due to this behavior: + + + +* Your [.noloc]`Pods` can communicate with internet resources only if the node that they're running on has a link:AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses[public,type="documentation"] or link:vpc/latest/userguide/vpc-eips.html[elastic,type="documentation"] IP address assigned to it and is in a link:vpc/latest/userguide/configure-subnets.html#subnet-basics[public subnet,type="documentation"]. A public subnet's associated link:vpc/latest/userguide/VPC_Route_Tables.html[route table,type="documentation"] has a route to an internet gateway. We recommend deploying nodes to private subnets, whenever possible. +* For versions of the plugin earlier than `1.8.0`, resources that are in networks or VPCs that are connected to your cluster VPC using link:vpc/latest/peering/what-is-vpc-peering.html[VPC peering,type="documentation"], a link:whitepapers/latest/aws-vpc-connectivity-options/transit-vpc-option.html[transit VPC,type="documentation"], or link:directconnect/latest/UserGuide/Welcome.html[{aws} Direct Connect,type="documentation"] can't initiate communication to your [.noloc]`Pods` behind secondary elastic network interfaces. Your [.noloc]`Pods` can initiate communication to those resources and receive responses from them, though. + +If either of the following statements are true in your environment, then change the default configuration with the command that follows. + + + +* You have resources in networks or VPCs that are connected to your cluster VPC using link:vpc/latest/peering/what-is-vpc-peering.html[VPC peering,type="documentation"], a link:whitepapers/latest/aws-vpc-connectivity-options/transit-vpc-option.html[transit VPC,type="documentation"], or link:directconnect/latest/UserGuide/Welcome.html[{aws} Direct Connect,type="documentation"] that need to initiate communication with your [.noloc]`Pods` using an `IPv4` address and your plugin version is earlier than `1.8.0`. +* Your [.noloc]`Pods` are in a link:vpc/latest/userguide/configure-subnets.html#subnet-basics[private subnet,type="documentation"] and need to communicate outbound to the internet. The subnet has a route to a link:vpc/latest/userguide/vpc-nat-gateway.html[NAT gateway,type="documentation"]. + + +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env daemonset -n kube-system aws-node AWS_VPC_K8S_CNI_EXTERNALSNAT=true +---- + +[NOTE] +==== + +The `AWS_VPC_K8S_CNI_EXTERNALSNAT` and `AWS_VPC_K8S_CNI_EXCLUDE_SNAT_CIDRS` CNI configuration variables aren't applicable to [.noloc]`Windows` nodes. Disabling SNAT isn't supported for [.noloc]`Windows`. As for excluding a list of `IPv4` CIDRs from SNAT, you can define this by specifying the `ExcludedSnatCIDRs` parameter in the [.noloc]`Windows` bootstrap script. For more information on using this parameter, see <>. + +==== + +[[snat-exception,snat-exception.title]] +===== Host networking + +^^*^^If a [.noloc]`Pod's` spec contains `hostNetwork=true` (default is `false`), then its IP address isn't translated to a different address. This is the case for the `kube-proxy` and [.noloc]`Amazon VPC CNI plugin for Kubernetes` [.noloc]`Pods` that run on your cluster, by default. For these [.noloc]`Pods`, the IP address is the same as the node's primary IP address, so the [.noloc]`Pod's` IP address isn't translated. For more information about a [.noloc]`Pod's` `hostNetwork` setting, see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podspec-v1-core[PodSpec v1 core] in the [.noloc]`Kubernetes` API reference. + +[.topic] +[[cni-network-policy,cni-network-policy.title]] +==== Limit [.noloc]`pod` traffic with [.noloc]`Kubernetes` network policies + +[abstract] +-- +Learn how to configure your Amazon EKS cluster to use [.noloc]`Kubernetes` network policies with the [.noloc]`Amazon VPC CNI` plugin. Control network traffic to and from pods using network policies for enhanced security. Covers network policy considerations, requirements, setup instructions, and troubleshooting tips. +-- + +By default, there are no restrictions in [.noloc]`Kubernetes` for IP addresses, ports, or connections between any [.noloc]`Pods` in your cluster or between your [.noloc]`Pods` and resources in any other network. You can use [.noloc]`Kubernetes` _network policy_ to restrict network traffic to and from your [.noloc]`Pods`. For more information, see https://kubernetes.io/docs/concepts/services-networking/network-policies/[Network Policies] in the [.noloc]`Kubernetes` documentation. + +If you have version `1.13` or earlier of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` on your cluster, you need to implement a third party solution to apply [.noloc]`Kubernetes` network policies to your cluster. Version `1.14` or later of the plugin can implement network policies, so you don't need to use a third party solution. In this topic, you learn how to configure your cluster to use [.noloc]`Kubernetes` network policy on your cluster without using a third party add-on. + +Network policies in the [.noloc]`Amazon VPC CNI plugin for Kubernetes` are supported in the following configurations. + + + +* Amazon EKS clusters of version `1.25` and later. +* Version 1.14 or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` on your cluster. +* Cluster configured for `IPv4` or `IPv6` addresses. +* You can use network policies with <>. With network policies, you can control all in-cluster communication. With security groups for [.noloc]`Pods`, you can control access to {aws} services from applications within a [.noloc]`Pod`. +* You can use network policies with _custom networking_ and _prefix delegation_. + + +[[cni-network-policy-considerations,cni-network-policy-considerations.title]] +===== Considerations + +*Architecture* + +* When applying [.noloc]`Amazon VPC CNI plugin for Kubernetes` network policies to your cluster with the [.noloc]`Amazon VPC CNI plugin for Kubernetes` , you can apply the policies to Amazon EC2 Linux nodes only. You can't apply the policies to Fargate or Windows nodes. +* Network policies only apply either `IPv4` or `IPv6` addresses, but not both. In an `IPv4` cluster, the VPC CNI assigns `IPv4` address to pods and applies `IPv4` policies. In an `IPv6` cluster, the VPC CNI assigns `IPv6` address to pods and applies `IPv6` policies. Any `IPv4` network policy rules applied to an `IPv6` cluster are ignored. Any `IPv6` network policy rules applied to an `IPv4` cluster are ignored. + +*Network Policies* + +* Network Policies are only applied to [.noloc]`Pods` that are part of a [.noloc]`Deployment`. Standalone [.noloc]`Pods` that don't have a `metadata.ownerReferences` set can't have network policies applied to them. +* You can apply multiple network policies to the same [.noloc]`Pod`. When two or more policies that select the same [.noloc]`Pod` are configured, all policies are applied to the [.noloc]`Pod`. +* The maximum number of unique combinations of ports for each protocol in each `ingress:` or `egress:` selector in a network policy is 24. +* For any of your [.noloc]`Kubernetes` services, the service port must be the same as the container port. If you're using named ports, use the same name in the service spec too. + +*Migration* + +* If your cluster is currently using a third party solution to manage [.noloc]`Kubernetes` network policies, you can use those same policies with the [.noloc]`Amazon VPC CNI plugin for Kubernetes`. However you must remove your existing solution so that it isn't managing the same policies. + +*Installation* + +* The network policy feature creates and requires a `PolicyEndpoint` Custom Resource Definition (CRD) called `policyendpoints.networking.k8s.aws`. `PolicyEndpoint` objects of the Custom Resource are managed by Amazon EKS. You shouldn't modify or delete these resources. +* If you run pods that use the instance role IAM credentials or connect to the EC2 IMDS, be careful to check for network policies that would block access to the EC2 IMDS. You may need to add a network policy to allow access to EC2 IMDS. For more information, see link:AWSEC2/latest/UserGuide/ec2-instance-metadata.html[Instance metadata and user data,type="documentation"] in the Amazon EC2 User Guide. ++ +Pods that use _IAM roles for service accounts_ or _EKS Pod Identity_ don't access EC2 IMDS. +* The [.noloc]`Amazon VPC CNI plugin for Kubernetes` doesn't apply network policies to additional network interfaces for each pod, only the primary interface for each pod (`eth0`). This affects the following architectures: ++ +** `IPv6` pods with the `ENABLE_V4_EGRESS` variable set to `true`. This variable enables the `IPv4` egress feature to connect the IPv6 pods to `IPv4` endpoints such as those outside the cluster. The `IPv4` egress feature works by creating an additional network interface with a local loopback IPv4 address. +** When using chained network plugins such as [.noloc]`Multus`. Because these plugins add network interfaces to each pod, network policies aren't applied to the chained network plugins. + + +[.topic] +[[cni-network-policy-configure,cni-network-policy-configure.title]] +===== Restrict Pod network traffic with [.noloc]`Kubernetes` network policies + +[abstract] +-- +Learn how to deploy [.noloc]`Kubernetes` network policies on your Amazon EKS cluster. +-- + +You can use a [.noloc]`Kubernetes` network policy to restrict network traffic to and from your [.noloc]`Pods`. For more information, see https://kubernetes.io/docs/concepts/services-networking/network-policies/[Network Policies] in the [.noloc]`Kubernetes` documentation. + +You must configure the following in order to use this feature: + +. Set up policy enforcement at [.noloc]`Pod` startup. You do this in the `aws-node` container of the VPC CNI `DaemonSet`. +. Enable the network policy parameter for the add-on. +. Configure your cluster to use the [.noloc]`Kubernetes` network policy + +Before you begin, review the considerations. For more information, see <>. + +[[cni-network-policy-prereqs,cni-network-policy-prereqs.title]] +====== Prerequisites + +The following are prerequisites for the feature: + + + +* +.Minimum cluster version +An existing Amazon EKS cluster. To deploy one, see <>. The cluster must be [.noloc]`Kubernetes` version `1.25` or later. The cluster must be running one of the [.noloc]`Kubernetes` versions and platform versions listed in the following table. Note that any [.noloc]`Kubernetes` and platform versions later than those listed are also supported. You can check your current [.noloc]`Kubernetes` version by replacing [.replaceable]`my-cluster` in the following command with the name of your cluster and then running the modified command: ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster + --name my-cluster --query cluster.version --output + text +---- ++ +[cols="1,1", options="header"] +|=== +|Kubernetes version +|Platform version + + +|`1.27.4` +|`eks.5` + +|`1.26.7` +|`eks.6` + +|`1.25.12` +|`eks.7` +|=== +* +.Minimum VPC CNI version +Version `1.14` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` on your cluster. You can see which version that you currently have with the following command. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3 +---- ++ +If your version is earlier than `1.14`, see <> to upgrade to version `1.14` or later. +* +.Minimum Linux kernel version +Your nodes must have Linux kernel version `5.10` or later. You can check your kernel version with `uname -r`. If you're using the latest versions of the Amazon EKS optimized Amazon Linux, Amazon EKS optimized accelerated Amazon Linux AMIs, and Bottlerocket AMIs, they already have the required kernel version. ++ +The Amazon EKS optimized accelerated Amazon Linux AMI version `v20231116` or later have kernel version `5.10`. + + +[[cni-network-policy-configure-policy,cni-network-policy-configure-policy.title]] +====== Step 1: Set up policy enforcement at [.noloc]`Pod` startup + + +The [.noloc]`Amazon VPC CNI plugin for Kubernetes` configures network policies for pods in parallel with the pod provisioning. Until all of the policies are configured for the new pod, containers in the new pod will start with a _default allow policy_. This is called _standard mode_. A default allow policy means that all ingress and egress traffic is allowed to and from the new pods. For example, the pods will not have any firewall rules enforced (all traffic is allowed) until the new pod is updated with the active policies. + +With the `NETWORK_POLICY_ENFORCING_MODE` variable set to `strict`, pods that use the VPC CNI start with a _default deny policy_, then policies are configured. This is called _strict mode_. In strict mode, you must have a network policy for every endpoint that your pods need to access in your cluster. Note that this requirement applies to the [.noloc]`CoreDNS` pods. The default deny policy isn't configured for pods with Host networking. + +You can change the default network policy by setting the environment variable `NETWORK_POLICY_ENFORCING_MODE` to `strict` in the `aws-node` container of the VPC CNI `DaemonSet`. + +[source,yaml,subs="verbatim,attributes"] +---- +env: + - name: NETWORK_POLICY_ENFORCING_MODE + value: "strict" +---- + + +[[enable-network-policy-parameter,enable-network-policy-parameter.title]] +====== Step 2: Enable the network policy parameter for the add-on + +The network policy feature uses port `8162` on the node for metrics by default. Also, the feature used port `8163` for health probes. If you run another application on the nodes or inside pods that needs to use these ports, the app fails to run. In VPC CNI version `v1.14.1` or later, you can change these ports. + +Use the following procedure to enable the network policy parameter for the add-on. + + + +{aws-management-console}:: +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the Amazon VPC CNI add-on for. +.. Choose the *Add-ons* tab. +.. Select the box in the top right of the add-on box and then choose *Edit*. +.. On the *Configure [.replaceable]`name of add-on`* page: ++ +... Select a `v1.14.0-eksbuild.3` or later version in the *Version* list. +... Expand the *Optional configuration settings*. +... Enter the JSON key `"enableNetworkPolicy":` and value `"true"` in *Configuration values*. The resulting text must be a valid JSON object. If this key and value are the only data in the text box, surround the key and value with curly braces `{ }`. ++ +The following example has network policy feature enabled and metrics and health probes are set to the default port numbers: ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "enableNetworkPolicy": "true", + "nodeAgent": { + "healthProbeBindAddr": "8163", + "metricsBindAddr": "8162" + } +} +---- + + +Helm:: + +If you have installed the [.noloc]`Amazon VPC CNI plugin for Kubernetes` through `helm`, you can update the configuration to change the ports. + +.. Run the following command to change the ports. Set the port number in the value for either key `nodeAgent.metricsBindAddr` or key `nodeAgent.healthProbeBindAddr`, respectively. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm upgrade --set nodeAgent.metricsBindAddr=8162 --set nodeAgent.healthProbeBindAddr=8163 aws-vpc-cni --namespace kube-system eks/aws-vpc-cni +---- + + +[.noloc]`kubectl`:: +.. Open the `aws-node` `DaemonSet` in your editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit daemonset -n kube-system aws-node +---- +.. Replace the port numbers in the following command arguments in the `args:` in the `aws-network-policy-agent` container in the VPC CNI `aws-node` daemonset manifest. ++ +[source,yaml,subs="verbatim,attributes"] +---- + - args: + - --metrics-bind-addr=:8162 + - --health-probe-bind-addr=:8163 +---- + + +[[cni-mount-bpf,cni-mount-bpf.title]] +====== Step 3: Mount the Berkeley Packet Filter (BPF) file system on your nodes + +You must mount the Berkeley Packet Filter (BPF) file system on each of your nodes. + +[NOTE] +==== + +If your cluster is version `1.27` or later, you can skip this step as all Amazon EKS optimized Amazon Linux and Bottlerocket AMIs for `1.27` or later have this feature already. + +For all other cluster versions, if you upgrade the Amazon EKS optimized Amazon Linux to version `v20230703` or later or you upgrade the Bottlerocket AMI to version `v1.0.2` or later, you can skip this step. + +==== +. Mount the Berkeley Packet Filter (BPF) file system on each of your nodes. ++ +[source,shell,subs="verbatim,attributes"] +---- +sudo mount -t bpf bpffs /sys/fs/bpf +---- +. Then, add the same command to your user data in your launch template for your Amazon EC2 Auto Scaling Groups. + + +[[cni-network-policy-setup,cni-network-policy-setup.title]] +====== Step 4: Configure your cluster to use [.noloc]`Kubernetes` network policies + +Configure the cluster to use [.noloc]`Kubernetes` network policies. You can set this for an Amazon EKS add-on or self-managed add-on. + + +[[cni-network-policy-setup-procedure-add-on,cni-network-policy-setup-procedure-add-on.title]] +.Amazon EKS add-on +[%collapsible] +==== + +{aws-management-console}:: +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the Amazon VPC CNI add-on for. +.. Choose the *Add-ons* tab. +.. Select the box in the top right of the add-on box and then choose *Edit*. +.. On the *Configure [.replaceable]`name of addon`* page: ++ +... Select a `v1.14.0-eksbuild.3` or later version in the *Version* list. +... Expand the *Optional configuration settings*. +... Enter the JSON key `"enableNetworkPolicy":` and value `"true"` in *Configuration values*. The resulting text must be a valid JSON object. If this key and value are the only data in the text box, surround the key and value with curly braces `{ }`. The following example shows network policy is enabled: ++ +[source,json,subs="verbatim,attributes"] +---- +{ "enableNetworkPolicy": "true" } +---- ++ +The following screenshot shows an example of this scenario. ++ +image::images/console-cni-config-network-policy.png[{aws-management-console} showing the VPC CNI add-on with network policy in the optional configuration.,scaledwidth=80%] + + +{aws} CLI:: +.. Run the following {aws} CLI command. Replace `my-cluster` with the name of your cluster and the IAM role ARN with the role that you are using. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name vpc-cni --addon-version v1.14.0-eksbuild.3 \ + --service-account-role-arn {arn-aws}iam::123456789012:role/AmazonEKSVPCCNIRole \ + --resolve-conflicts PRESERVE --configuration-values '{"enableNetworkPolicy": "true"}' +---- + +==== + +[[cni-network-policy-setup-procedure-self-managed-add-on,cni-network-policy-setup-procedure-self-managed-add-on.title]] +.Self-managed add-on +[%collapsible] +==== + +Helm:: + +If you have installed the [.noloc]`Amazon VPC CNI plugin for Kubernetes` through `helm`, you can update the configuration to enable network policy. + +.. Run the following command to enable network policy. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm upgrade --set enableNetworkPolicy=true aws-vpc-cni --namespace kube-system eks/aws-vpc-cni +---- + + +[.noloc]`kubectl`:: +.. Open the `amazon-vpc-cni` `ConfigMap` in your editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit configmap -n kube-system amazon-vpc-cni -o yaml +---- +.. Add the following line to the `data` in the `ConfigMap`. ++ +[source,bash,subs="verbatim,attributes"] +---- +enable-network-policy-controller: "true" +---- ++ +Once you've added the line, your `ConfigMap` should look like the following example. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: v1 + kind: ConfigMap + metadata: + name: amazon-vpc-cni + namespace: kube-system + data: + enable-network-policy-controller: "true" +---- +.. Open the `aws-node` `DaemonSet` in your editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit daemonset -n kube-system aws-node +---- +.. Replace the `false` with `true` in the command argument `--enable-network-policy=false` in the `args:` in the `aws-network-policy-agent` container in the VPC CNI `aws-node` daemonset manifest. ++ +[source,yaml,subs="verbatim,attributes"] +---- + - args: + - --enable-network-policy=true +---- + +==== + +[[cni-network-policy-setup-procedure-confirm,cni-network-policy-setup-procedure-confirm.title]] +====== Step 5. Next steps + +After you complete the configuration, confirm that the `aws-node` pods are running on your cluster. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -n kube-system | grep 'aws-node\|amazon' +---- + +An example output is as follows. + +[source,bash,subs="verbatim,attributes"] +---- +aws-node-gmqp7 2/2 Running 1 (24h ago) 24h +aws-node-prnsh 2/2 Running 1 (24h ago) 24h +---- + +There are 2 containers in the `aws-node` pods in versions `1.14` and later. In previous versions and if network policy is disabled, there is only a single container in the `aws-node` pods. + +You can now deploy [.noloc]`Kubernetes` network policies to your cluster. + +To implement [.noloc]`Kubernetes` network policies you create [.noloc]`Kubernetes` `NetworkPolicy` objects and deploy them to your cluster. `NetworkPolicy` objects are scoped to a namespace. You implement policies to allow or deny traffic between [.noloc]`Pods` based on label selectors, namespaces, and IP address ranges. For more information about creating `NetworkPolicy` objects, see https://kubernetes.io/docs/concepts/services-networking/network-policies/#networkpolicy-resource[Network Policies] in the [.noloc]`Kubernetes` documentation. + +Enforcement of [.noloc]`Kubernetes` `NetworkPolicy` objects is implemented using the [.noloc]`Extended Berkeley Packet Filter` ([.noloc]`eBPF`). Relative to `iptables` based implementations, it offers lower latency and performance characteristics, including reduced CPU utilization and avoiding sequential lookups. Additionally, [.noloc]`eBPF` probes provide access to context rich data that helps debug complex kernel level issues and improve observability. Amazon EKS supports an [.noloc]`eBPF`-based exporter that leverages the probes to log policy results on each node and export the data to external log collectors to aid in debugging. For more information, see the https://ebpf.io/what-is-ebpf/#what-is-ebpf[eBPF documentation]. + +[.topic] +[[network-policy-disable,network-policy-disable.title]] +===== Disable [.noloc]`Kubernetes` network policies for Amazon EKS Pod network traffic + +[abstract] +-- +Learn how to disable [.noloc]`Kubernetes` network policies for Amazon EKS Pod network traffic. +-- + +Disable [.noloc]`Kubernetes` network policies to stop restricting Amazon EKS Pod network traffic + +. List all Kubernetes network policies. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get netpol -A +---- +. Delete each Kubernetes network policy. You must delete all network policies before disabling network policies. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete netpol +---- +. Open the aws-node DaemonSet in your editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit daemonset -n kube-system aws-node +---- +. Replace the `true` with `false` in the command argument `--enable-network-policy=true` in the `args:` in the `aws-network-policy-agent` container in the VPC CNI `aws-node` daemonset manifest. ++ +[source,yaml,subs="verbatim,attributes"] +---- + - args: + - --enable-network-policy=true +---- + + +include::network-policies-troubleshooting.adoc[leveloffset=+1] + + +include::network-policy-stars-demo.adoc[leveloffset=+1] + + +[.topic] +[[cni-custom-network,cni-custom-network.title]] +=== Deploy [.noloc]`pods` in alternate subnets with custom networking + +[abstract] +-- +Learn how to enable custom networking for Amazon EKS [.noloc]`Pods` to deploy them in different subnets or use different security groups than the node's primary network interface, increasing IP address availability and network isolation. +-- + +*Applies to*: [.noloc]`Linux` `IPv4` Fargate nodes, [.noloc]`Linux` nodes with Amazon EC2 instances + +By default, when the [.noloc]`Amazon VPC CNI plugin for Kubernetes` creates secondary link:AWSEC2/latest/UserGuide/using-eni.html[elastic network interfaces,type="documentation"] (network interfaces) for your Amazon EC2 node, it creates them in the same subnet as the node's primary network interface. It also associates the same security groups to the secondary network interface that are associated to the primary network interface. For one or more of the following reasons, you might want the plugin to create secondary network interfaces in a different subnet or want to associate different security groups to the secondary network interfaces, or both: + + + +* There's a limited number of `IPv4` addresses that are available in the subnet that the primary network interface is in. This might limit the number of [.noloc]`Pods` that you can create in the subnet. By using a different subnet for secondary network interfaces, you can increase the number of available `IPv4` addresses available for [.noloc]`Pods`. +* For security reasons, your [.noloc]`Pods` might need to use a different subnet or security groups than the node's primary network interface. +* The nodes are configured in public subnets, and you want to place the [.noloc]`Pods` in private subnets. The route table associated to a public subnet includes a route to an internet gateway. The route table associated to a private subnet doesn't include a route to an internet gateway. + + +[[cni-custom-network-considerations,cni-custom-network-considerations.title]] +==== Considerations + +The following are considerations for using the feature. + + + +* With custom networking enabled, no IP addresses assigned to the primary network interface are assigned to [.noloc]`Pods`. Only IP addresses from secondary network interfaces are assigned to [.noloc]`Pods`. +* If your cluster uses the `IPv6` family, you can't use custom networking. +* If you plan to use custom networking only to help alleviate `IPv4` address exhaustion, you can create a cluster using the `IPv6` family instead. For more information, see <>. +* Even though [.noloc]`Pods` deployed to subnets specified for secondary network interfaces can use different subnet and security groups than the node's primary network interface, the subnets and security groups must be in the same VPC as the node. +* For Fargate, subnets are controlled through the Fargate profile. For more information, see <>. + + +[.topic] +[[cni-custom-network-tutorial,cni-custom-network-tutorial.title]] +==== Customizing the secondary network interface in Amazon EKS nodes + +[abstract] +-- +Learn how your [.noloc]`Pods` can use different security groups and subnets than the primary elastic network interface of the Amazon EC2 node that they run on. +-- + +Complete the following before you start the tutorial: + + + +* Review the considerations +* Familiarity with how the [.noloc]`Amazon VPC CNI plugin for Kubernetes` creates secondary network interfaces and assigns IP addresses to [.noloc]`Pods`. For more information, see https://github.com/aws/amazon-vpc-cni-k8s#eni-allocation[ENI Allocation] on [.noloc]`GitHub`. +* Version `2.12.3` or later or version `1.27.160` or later of the {aws} Command Line Interface ({aws} CLI) installed and configured on your device or {aws} CloudShell. To check your current version, use `aws --version | cut -d / -f2 | cut -d ' ' -f1`. Package managers such `yum`, `apt-get`, or [.noloc]`Homebrew` for [.noloc]`macOS` are often several versions behind the latest version of the {aws} CLI. To install the latest version, see link:cli/latest/userguide/cli-chap-install.html[Installing, updating, and uninstalling the {aws} CLI,type="documentation"] and link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-config[Quick configuration with aws configure,type="documentation"] in the _{aws} Command Line Interface User Guide_. The {aws} CLI version that is installed in {aws} CloudShell might also be several versions behind the latest version. To update it, see link:cloudshell/latest/userguide/vm-specs.html#install-cli-software[Installing {aws} CLI to your home directory,type="documentation"] in the _{aws} CloudShell User Guide_. +* The `kubectl` command line tool is installed on your device or {aws} CloudShell. The version can be the same as or up to one minor version earlier or later than the [.noloc]`Kubernetes` version of your cluster. For example, if your cluster version is `1.29`, you can use `kubectl` version `1.28`, `1.29`, or `1.30` with it. To install or upgrade `kubectl`, see <>. +* We recommend that you complete the steps in this topic in a Bash shell. If you aren't using a Bash shell, some script commands such as line continuation characters and the way variables are set and used require adjustment for your shell. Additionally, the quoting and escaping rules for your shell might be different. For more information, see link:cli/latest/userguide/cli-usage-parameters-quoting-strings.html[Using quotation marks with strings in the {aws} CLI,type="documentation"] in the {aws} Command Line Interface User Guide. + +For this tutorial, we recommend using the [.replaceable]`example values`, except where it's noted to replace them. You can replace any [.replaceable]`example value` when completing the steps for a production cluster. We recommend completing all steps in the same terminal. This is because variables are set and used throughout the steps and won't exist in different terminals. + +The commands in this topic are formatted using the conventions listed in link:cli/latest/userguide/welcome-examples.html[Using the {aws} CLI examples,type="documentation"]. If you're running commands from the command line against resources that are in a different {aws} Region than the default {aws} Region defined in the {aws} CLI link:cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-profiles[profile,type="documentation"] that you're using, then you need to add `--region [.replaceable]``region-code``` to the commands. + +When you want to deploy custom networking to your production cluster, skip to <>. + +[[custom-networking-create-cluster,custom-networking-create-cluster.title]] +===== Step 1: Create a test VPC and cluster + +The following procedures help you create a test VPC and cluster and configure custom networking for that cluster. We don't recommend using the test cluster for production workloads because several unrelated features that you might use on your production cluster aren't covered in this topic. For more information, see <>. + +. Define the `cluster_name` and `account_id` variables.. ++ +[source,bash,subs="verbatim,attributes"] +---- +export cluster_name=my-custom-networking-cluster +account_id=$(aws sts get-caller-identity --query Account --output text) +---- +. Create a VPC. ++ +.. If you are deploying to a test system, create a VPC using an Amazon EKS {aws} CloudFormation template. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation create-stack --stack-name my-eks-custom-networking-vpc \ + --template-url https://s3.us-west-2.amazonaws.com/amazon-eks/cloudformation/2020-10-29/amazon-eks-vpc-private-subnets.yaml \ + --parameters ParameterKey=VpcBlock,ParameterValue=192.168.0.0/24 \ + ParameterKey=PrivateSubnet01Block,ParameterValue=192.168.0.64/27 \ + ParameterKey=PrivateSubnet02Block,ParameterValue=192.168.0.96/27 \ + ParameterKey=PublicSubnet01Block,ParameterValue=192.168.0.0/27 \ + ParameterKey=PublicSubnet02Block,ParameterValue=192.168.0.32/27 +---- ++ +The {aws} CloudFormation stack takes a few minutes to create. To check on the stack's deployment status, run the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation describe-stacks --stack-name my-eks-custom-networking-vpc --query Stacks\[\].StackStatus --output text +---- ++ +Don't continue to the next step until the output of the command is `CREATE_COMPLETE`. +.. Define variables with the values of the private subnet IDs created by the template. ++ +[source,bash,subs="verbatim,attributes"] +---- +subnet_id_1=$(aws cloudformation describe-stack-resources --stack-name my-eks-custom-networking-vpc \ + --query "StackResources[?LogicalResourceId=='PrivateSubnet01'].PhysicalResourceId" --output text) +subnet_id_2=$(aws cloudformation describe-stack-resources --stack-name my-eks-custom-networking-vpc \ + --query "StackResources[?LogicalResourceId=='PrivateSubnet02'].PhysicalResourceId" --output text) +---- +.. Define variables with the Availability Zones of the subnets retrieved in the previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +az_1=$(aws ec2 describe-subnets --subnet-ids $subnet_id_1 --query 'Subnets[*].AvailabilityZone' --output text) +az_2=$(aws ec2 describe-subnets --subnet-ids $subnet_id_2 --query 'Subnets[*].AvailabilityZone' --output text) +---- +. Create a cluster IAM role. ++ +.. Run the following command to create an IAM trust policy JSON file. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >eks-cluster-role-trust-policy.json <>. +.. The cluster takes several minutes to create. To check on the cluster's deployment status, run the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-custom-networking-cluster --query cluster.status +---- ++ +Don't continue to the next step until the output of the command is `"ACTIVE"`. +.. Configure `kubectl` to communicate with your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-kubeconfig --name my-custom-networking-cluster +---- + + +[[custom-networking-configure-vpc,custom-networking-configure-vpc.title]] +===== Step 2: Configure your VPC + +This tutorial requires the VPC created in <>. For a production cluster, adjust the steps accordingly for your VPC by replacing all of the [.replaceable]`example values` with your own. + +. Confirm that your currently-installed [.noloc]`Amazon VPC CNI plugin for Kubernetes` is the latest version. To determine the latest version for the Amazon EKS add-on type and update your version to it, see <>. To determine the latest version for the self-managed add-on type and update your version to it, see <>. +. Retrieve the ID of your cluster VPC and store it in a variable for use in later steps. For a production cluster, replace [.replaceable]`my-custom-networking-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +vpc_id=$(aws eks describe-cluster --name my-custom-networking-cluster --query "cluster.resourcesVpcConfig.vpcId" --output text) +---- +. Associate an additional Classless Inter-Domain Routing (CIDR) block with your cluster's VPC. The CIDR block can't overlap with any existing associated CIDR blocks. ++ +.. View the current CIDR blocks associated to your VPC. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 describe-vpcs --vpc-ids $vpc_id \ + --query 'Vpcs[*].CidrBlockAssociationSet[*].{CIDRBlock: CidrBlock, State: CidrBlockState.State}' --out table +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +---------------------------------- +| DescribeVpcs | ++-----------------+--------------+ +| CIDRBlock | State | ++-----------------+--------------+ +| 192.168.0.0/24 | associated | ++-----------------+--------------+ +---- +.. Associate an additional CIDR block to your VPC. For more information, see link:vpc/latest/userguide/modify-vpcs.html#add-ipv4-cidr[Associate additional IPv4 CIDR blocks with your VPC,type="documentation"] in the Amazon VPC User Guide. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 associate-vpc-cidr-block --vpc-id $vpc_id --cidr-block 192.168.1.0/24 +---- +.. Confirm that the new block is associated. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 describe-vpcs --vpc-ids $vpc_id --query 'Vpcs[*].CidrBlockAssociationSet[*].{CIDRBlock: CidrBlock, State: CidrBlockState.State}' --out table +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +---------------------------------- +| DescribeVpcs | ++-----------------+--------------+ +| CIDRBlock | State | ++-----------------+--------------+ +| 192.168.0.0/24 | associated | +| 192.168.1.0/24 | associated | ++-----------------+--------------+ +---- + ++ +Don't proceed to the next step until your new CIDR block's `State` is `associated`. +. Create as many subnets as you want to use in each Availability Zone that your existing subnets are in. Specify a CIDR block that's within the CIDR block that you associated with your VPC in a previous step. ++ +.. Create new subnets. The subnets must be created in a different VPC CIDR block than your existing subnets are in, but in the same Availability Zones as your existing subnets. In this example, one subnet is created in the new CIDR block in each Availability Zone that the current private subnets exist in. The IDs of the subnets created are stored in variables for use in later steps. The `Name` values match the values assigned to the subnets created using the Amazon EKS VPC template in a previous step. Names aren't required. You can use different names. ++ +[source,bash,subs="verbatim,attributes"] +---- +new_subnet_id_1=$(aws ec2 create-subnet --vpc-id $vpc_id --availability-zone $az_1 --cidr-block 192.168.1.0/27 \ + --tag-specifications 'ResourceType=subnet,Tags=[{Key=Name,Value=my-eks-custom-networking-vpc-PrivateSubnet01},{Key=kubernetes.io/role/internal-elb,Value=1}]' \ + --query Subnet.SubnetId --output text) +new_subnet_id_2=$(aws ec2 create-subnet --vpc-id $vpc_id --availability-zone $az_2 --cidr-block 192.168.1.32/27 \ + --tag-specifications 'ResourceType=subnet,Tags=[{Key=Name,Value=my-eks-custom-networking-vpc-PrivateSubnet02},{Key=kubernetes.io/role/internal-elb,Value=1}]' \ + --query Subnet.SubnetId --output text) +---- ++ +IMPORTANT: By default, your new subnets are implicitly associated with your VPC's link:vpc/latest/userguide/VPC_Route_Tables.html#RouteTables[main route table,type="documentation"]. This route table allows communication between all the resources that are deployed in the VPC. However, it doesn't allow communication with resources that have IP addresses that are outside the CIDR blocks that are associated with your VPC. You can associate your own route table to your subnets to change this behavior. For more information, see link:vpc/latest/userguide/VPC_Route_Tables.html#subnet-route-tables[Subnet route tables,type="documentation"] in the Amazon VPC User Guide. +.. View the current subnets in your VPC. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 describe-subnets --filters "Name=vpc-id,Values=$vpc_id" \ + --query 'Subnets[*].{SubnetId: SubnetId,AvailabilityZone: AvailabilityZone,CidrBlock: CidrBlock}' \ + --output table +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +---------------------------------------------------------------------- +| DescribeSubnets | ++------------------+--------------------+----------------------------+ +| AvailabilityZone | CidrBlock | SubnetId | ++------------------+--------------------+----------------------------+ +| us-west-2d | 192.168.0.0/27 | subnet-example1 | +| us-west-2a | 192.168.0.32/27 | subnet-example2 | +| us-west-2a | 192.168.0.64/27 | subnet-example3 | +| us-west-2d | 192.168.0.96/27 | subnet-example4 | +| us-west-2a | 192.168.1.0/27 | subnet-example5 | +| us-west-2d | 192.168.1.32/27 | subnet-example6 | ++------------------+--------------------+----------------------------+ +---- ++ +You can see the subnets in the `192.168.1.0` CIDR block that you created are in the same Availability Zones as the subnets in the `192.168.0.0` CIDR block. + + +[[custom-networking-configure-kubernetes,custom-networking-configure-kubernetes.title]] +===== Step 3: Configure [.noloc]`Kubernetes` resources +. Set the `AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG` environment variable to `true` in the `aws-node` [.noloc]`DaemonSet`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true +---- +. Retrieve the ID of your <> and store it in a variable for use in the next step. Amazon EKS automatically creates this security group when you create your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +cluster_security_group_id=$(aws eks describe-cluster --name $cluster_name --query cluster.resourcesVpcConfig.clusterSecurityGroupId --output text) +---- +. [[custom-networking-create-eniconfig]]Create an `ENIConfig` custom resource for each subnet that you want to deploy [.noloc]`Pods` in. ++ +.. Create a unique file for each network interface configuration. ++ ++ +The following commands create separate `ENIConfig` files for the two subnets that were created in a previous step. The value for `name` must be unique. The name is the same as the Availability Zone that the subnet is in. The cluster security group is assigned to the `ENIConfig`. ++ +[source,yaml,subs="verbatim,attributes"] +---- +cat >$az_1.yaml <$az_2.yaml <> later in this tutorial. ++ +NOTE: If you don't specify a valid security group for use with a production cluster and you're using: + +*** version `1.8.0` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, then the security groups associated with the node's primary elastic network interface are used. +*** a version of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` that's earlier than `1.8.0`, then the default security group for the VPC is assigned to secondary network interfaces. + ++ +IMPORTANT: +*** `AWS_VPC_K8S_CNI_EXTERNALSNAT=false` is a default setting in the configuration for the Amazon VPC CNI plugin for [.noloc]`Kubernetes`. If you're using the default setting, then traffic that is destined for IP addresses that aren't within one of the CIDR blocks associated with your VPC use the security groups and subnets of your node's primary network interface. The subnets and security groups defined in your `ENIConfigs` that are used to create secondary network interfaces aren't used for this traffic. For more information about this setting, see <>. +*** If you also use security groups for [.noloc]`Pods`, the security group that's specified in a `SecurityGroupPolicy` is used instead of the security group that's specified in the `ENIConfigs`. For more information, see <>. + +.. Apply each custom resource file that you created to your cluster with the following commands. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f $az_1.yaml +kubectl apply -f $az_2.yaml +---- +. Confirm that your `ENIConfigs` were created. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get ENIConfigs +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME AGE +us-west-2a 117s +us-west-2d 105s +---- +. If you're enabling custom networking on a production cluster and named your `ENIConfigs` something other than the Availability Zone that you're using them for, then skip to the <> to deploy Amazon EC2 nodes. ++ +Enable [.noloc]`Kubernetes` to automatically apply the `ENIConfig` for an Availability Zone to any new Amazon EC2 nodes created in your cluster. ++ +.. For the test cluster in this tutorial, skip to the <>. ++ +For a production cluster, check to see if an [.noloc]`annotation` with the key `k8s.amazonaws.com/eniConfig` for the `https://github.com/aws/amazon-vpc-cni-k8s#eni_config_annotation_def[ENI_CONFIG_ANNOTATION_DEF]` environment variable exists in the container spec for the `aws-node` [.noloc]`DaemonSet`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node -n kube-system | grep ENI_CONFIG_ANNOTATION_DEF +---- ++ +If output is returned, the annotation exists. If no output is returned, then the variable is not set. For a production cluster, you can use either this setting or the setting in the following step. If you use this setting, it overrides the setting in the following step. In this tutorial, the setting in the next step is used. +.. [[custom-networking-automatically-apply-eniconfig]]Update your `aws-node` [.noloc]`DaemonSet` to automatically apply the `ENIConfig` for an Availability Zone to any new Amazon EC2 nodes created in your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env daemonset aws-node -n kube-system ENI_CONFIG_LABEL_DEF=topology.kubernetes.io/zone +---- + + +[[custom-networking-deploy-nodes,custom-networking-deploy-nodes.title]] +===== Step 4: Deploy Amazon EC2 nodes +. Create a node IAM role. ++ +.. Run the following command to create an IAM trust policy JSON file. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >node-role-trust-relationship.json <>. +. Create one of the following types of node groups. To determine the instance type that you want to deploy, see <>. For this tutorial, complete the *Managed*, *Without a launch template or with a launch template without an AMI ID specified* option. If you're going to use the node group for production workloads, then we recommend that you familiarize yourself with all of the managed node group <> and self-managed node group <> options before deploying the node group. ++ +** *Managed* – Deploy your node group using one of the following options: ++ +*** *Without a launch template or with a launch template without an AMI ID specified* – Run the following command. For this tutorial, use the [.replaceable]`example values`. For a production node group, replace all [.replaceable]`example values` with your own. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-nodegroup --cluster-name $cluster_name --nodegroup-name my-nodegroup \ + --subnets $subnet_id_1 $subnet_id_2 --instance-types t3.medium --node-role $node_role_arn +---- +*** *With a launch template with a specified AMI ID*:: + ++ +.... Determine the Amazon EKS recommended number of maximum [.noloc]`Pods `for your nodes. Follow the instructions in <>, adding `--cni-custom-networking-enabled` to step 3 in that topic. Note the output for use in the next step. +.... In your launch template, specify an Amazon EKS optimized AMI ID, or a custom AMI built off the Amazon EKS optimized AMI, then <> and provide the following user data in the launch template. This user data passes arguments into the `bootstrap.sh` file. For more information about the bootstrap file, see https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap.sh] on [.noloc]`GitHub`. You can replace [.replaceable]`20` with either the value from the previous step (recommended) or your own value. ++ +[source,bash,subs="verbatim,attributes"] +---- +/etc/eks/bootstrap.sh my-cluster --use-max-pods false --kubelet-extra-args '--max-pods=20' +---- ++ +If you've created a custom AMI that is not built off the Amazon EKS optimized AMI, then you need to custom create the configuration yourself. +** *Self-managed*:: + ++ +... Determine the Amazon EKS recommended number of maximum [.noloc]`Pods` for your nodes. Follow the instructions in <>, adding `--cni-custom-networking-enabled` to step 3 in that topic. Note the output for use in the next step. +... Deploy the node group using the instructions in <>. Specify the following text for the *BootstrapArguments* parameter. You can replace [.replaceable]`20` with either the value from the previous step (recommended) or your own value. ++ +[source,bash,subs="verbatim,attributes"] +---- +--use-max-pods false --kubelet-extra-args '--max-pods=20' +---- ++ +NOTE: If you want nodes in a production cluster to support a significantly higher number of [.noloc]`Pods`, run the script in <> again. Also, add the `--cni-prefix-delegation-enabled` option to the command. For example, [.replaceable]`110` is returned for an `m5.large` instance type. For instructions on how to enable this capability, see <>. You can use this capability with custom networking. ++ +Node group creation takes several minutes. You can check the status of the creation of a managed node group with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-nodegroup --cluster-name $cluster_name --nodegroup-name my-nodegroup --query nodegroup.status --output text +---- ++ +Don't continue to the next step until the output returned is `ACTIVE`. +. [[custom-networking-annotate-eniconfig]]For the tutorial, you can skip this step. ++ +For a production cluster, if you didn't name your `ENIConfigs` the same as the Availability Zone that you're using them for, then you must annotate your nodes with the `ENIConfig` name that should be used with the node. This step isn't necessary if you only have one subnet in each Availability Zone and you named your `ENIConfigs` with the same names as your Availability Zones. This is because the [.noloc]`Amazon VPC CNI plugin for Kubernetes` automatically associates the correct `ENIConfig` with the node for you when you enabled it to do so in a <>. ++ +.. Get the list of nodes in your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME STATUS ROLES AGE VERSION +ip-192-168-0-126.us-west-2.compute.internal Ready 8m49s v1.22.9-eks-810597c +ip-192-168-0-92.us-west-2.compute.internal Ready 8m34s v1.22.9-eks-810597c +---- +.. Determine which Availability Zone each node is in. Run the following command for each node that was returned in the previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 describe-instances --filters Name=network-interface.private-dns-name,Values=ip-192-168-0-126.us-west-2.compute.internal \ +--query 'Reservations[].Instances[].{AvailabilityZone: Placement.AvailabilityZone, SubnetId: SubnetId}' +---- ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +[ + { + "AvailabilityZone": "us-west-2d", + "SubnetId": "subnet-Example5" + } +] +---- +.. Annotate each node with the `ENIConfig` that you created for the subnet ID and Availability Zone. You can only annotate a node with one `ENIConfig`, though multiple nodes can be annotated with the same `ENIConfig`. Replace the [.replaceable]`example values` with your own. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl annotate node ip-192-168-0-126.us-west-2.compute.internal k8s.amazonaws.com/eniConfig=EniConfigName1 +kubectl annotate node ip-192-168-0-92.us-west-2.compute.internal k8s.amazonaws.com/eniConfig=EniConfigName2 +---- +. [[custom-networking-terminate-existing-nodes]]If you had nodes in a production cluster with running [.noloc]`Pods` before you switched to using the custom networking feature, complete the following tasks: ++ +.. Make sure that you have available nodes that are using the custom networking feature. +.. Cordon and drain the nodes to gracefully shut down the [.noloc]`Pods`. For more information, see https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/[Safely Drain a Node] in the [.noloc]`Kubernetes` documentation. +.. Terminate the nodes. If the nodes are in an existing managed node group, you can delete the node group. Copy the command that follows to your device. Make the following modifications to the command as needed and then run the modified command: ++ +*** Replace [.replaceable]`my-cluster` with the name for your cluster. +*** Replace [.replaceable]`my-nodegroup` with the name for your node group. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-nodegroup --cluster-name my-cluster --nodegroup-name my-nodegroup +---- + ++ +Only new nodes that are registered with the `k8s.amazonaws.com/eniConfig` label use the custom networking feature. +. Confirm that [.noloc]`Pods` are assigned an IP address from a CIDR block that's associated to one of the subnets that you created in a previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -A -o wide +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +kube-system aws-node-2rkn4 1/1 Running 0 7m19s 192.168.0.92 ip-192-168-0-92.us-west-2.compute.internal +kube-system aws-node-k96wp 1/1 Running 0 7m15s 192.168.0.126 ip-192-168-0-126.us-west-2.compute.internal +kube-system coredns-657694c6f4-smcgr 1/1 Running 0 56m 192.168.1.23 ip-192-168-0-92.us-west-2.compute.internal +kube-system coredns-657694c6f4-stwv9 1/1 Running 0 56m 192.168.1.28 ip-192-168-0-92.us-west-2.compute.internal +kube-system kube-proxy-jgshq 1/1 Running 0 7m19s 192.168.0.92 ip-192-168-0-92.us-west-2.compute.internal +kube-system kube-proxy-wx9vk 1/1 Running 0 7m15s 192.168.0.126 ip-192-168-0-126.us-west-2.compute.internal +---- ++ +You can see that the coredns [.noloc]`Pods` are assigned IP addresses from the `192.168.1.0` CIDR block that you added to your VPC. Without custom networking, they would have been assigned addresses from the `192.168.0.0` CIDR block, because it was the only CIDR block originally associated with the VPC. ++ +If a [.noloc]`Pod's` `spec` contains `hostNetwork=true`, it's assigned the primary IP address of the node. It isn't assigned an address from the subnets that you added. By default, this value is set to `false`. This value is set to `true` for the `kube-proxy` and [.noloc]`Amazon VPC CNI plugin for Kubernetes` (`aws-node`) [.noloc]`Pods` that run on your cluster. This is why the `kube-proxy` and the plugin's `aws-node` [.noloc]`Pods` aren't assigned `192.168.1.[.replaceable]``x``` addresses in the previous output. For more information about a [.noloc]`Pod's` `hostNetwork` setting, see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podspec-v1-core[PodSpec v1 core] in the [.noloc]`Kubernetes` API reference. + + +[[custom-network-delete-resources,custom-network-delete-resources.title]] +===== Step 5: Delete tutorial resources + +After you complete the tutorial, we recommend that you delete the resources that you created. You can then adjust the steps to enable custom networking for a production cluster. + +. If the node group that you created was just for testing, then delete it. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-nodegroup --cluster-name $cluster_name --nodegroup-name my-nodegroup +---- ++ +Even after the {aws} CLI output says that the cluster is deleted, the delete process might not actually be complete. The delete process takes a few minutes. Confirm that it's complete by running the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-nodegroup --cluster-name $cluster_name --nodegroup-name my-nodegroup --query nodegroup.status --output text +---- ++ +Don't continue until the returned output is similar to the following output. ++ +[source,bash,subs="verbatim,attributes"] +---- +An error occurred (ResourceNotFoundException) when calling the DescribeNodegroup operation: No node group found for name: my-nodegroup. +---- +. If the node group that you created was just for testing, then delete the node IAM role. ++ +.. Detach the policies from the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam detach-role-policy --role-name myCustomNetworkingNodeRole --policy-arn {arn-aws}iam::aws:policy/AmazonEKSWorkerNodePolicy +aws iam detach-role-policy --role-name myCustomNetworkingNodeRole --policy-arn {arn-aws}iam::aws:policy/AmazonEC2ContainerRegistryReadOnly +aws iam detach-role-policy --role-name myCustomNetworkingNodeRole --policy-arn {arn-aws}iam::aws:policy/AmazonEKS_CNI_Policy +---- +.. Delete the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam delete-role --role-name myCustomNetworkingNodeRole +---- +. Delete the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-cluster --name $cluster_name +---- ++ +Confirm the cluster is deleted with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name $cluster_name --query cluster.status --output text +---- ++ +When output similar to the following is returned, the cluster is successfully deleted. ++ +[source,bash,subs="verbatim,attributes"] +---- +An error occurred (ResourceNotFoundException) when calling the DescribeCluster operation: No cluster found for name: my-cluster. +---- +. Delete the cluster IAM role. ++ +.. Detach the policies from the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam detach-role-policy --role-name myCustomNetworkingAmazonEKSClusterRole --policy-arn {arn-aws}iam::aws:policy/AmazonEKSClusterPolicy +---- +.. Delete the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam delete-role --role-name myCustomNetworkingAmazonEKSClusterRole +---- +. Delete the subnets that you created in a previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 delete-subnet --subnet-id $new_subnet_id_1 +aws ec2 delete-subnet --subnet-id $new_subnet_id_2 +---- +. Delete the VPC that you created. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws cloudformation delete-stack --stack-name my-eks-custom-networking-vpc +---- + + +[.topic] +[[cni-increase-ip-addresses,cni-increase-ip-addresses.title]] +=== Assign more IP addresses to Amazon EKS nodes with prefixes + +[abstract] +-- +Learn how to significantly increase the number of IP addresses that you can assign to [.noloc]`Pods` by assigning IP prefixes with Amazon EKS, improving scalability and reducing launch delays for large and spiky workloads. +-- + +*Applies to*: Linux and Windows nodes with Amazon EC2 instances + +*Applies to*: Public and private subnets + +Each Amazon EC2 instance supports a maximum number of elastic network interfaces and a maximum number of IP addresses that can be assigned to each network interface. Each node requires one IP address for each network interface. All other available IP addresses can be assigned to `Pods`. Each `Pod` requires its own IP address. As a result, you might have nodes that have available compute and memory resources, but can't accommodate additional `Pods` because the node has run out of IP addresses to assign to `Pods`. + +You can increase the number of IP addresses that nodes can assign to `Pods` by assigning IP prefixes, rather than assigning individual secondary IP addresses to your nodes. Each prefix includes several IP addresses. If you don't configure your cluster for IP prefix assignment, your cluster must make more Amazon EC2 application programming interface (API) calls to configure network interfaces and IP addresses necessary for [.noloc]`Pod` connectivity. As clusters grow to larger sizes, the frequency of these API calls can lead to longer [.noloc]`Pod` and instance launch times. This results in scaling delays to meet the demand of large and spiky workloads, and adds cost and management overhead because you need to provision additional clusters and VPCs to meet scaling requirements. For more information, see https://github.com/kubernetes/community/blob/master/sig-scalability/configs-and-limits/thresholds.md[Kubernetes Scalability thresholds] on GitHub. + +[[cni-increase-ip-addresses-compatability,cni-increase-ip-addresses-compatability.title]] +==== Compatibility with [.noloc]`Amazon VPC CNI plugin for Kubernetes` features + +You can use IP prefixes with the following features: + + + +* IPv4 Source Network Address Translation - For more information, see <>. +* IPv6 addresses to clusters, Pods, and services - For more information, see <>. +* Restricting traffic using [.noloc]`Kubernetes` network policies - For more information, see <>. + +The following list provides information about the Amazon VPC CNI plugin settings that apply. For more information about each setting, see https://github.com/aws/amazon-vpc-cni-k8s/blob/master/README.md[amazon-vpc-cni-k8s] on [.noloc]`GitHub`. + + + +* `WARM_IP_TARGET` +* `MINIMUM_IP_TARGET` +* `WARM_PREFIX_TARGET` + + +[[cni-increase-ip-addresses-considerations,cni-increase-ip-addresses-considerations.title]] +==== Considerations + +Consider the following when you use this feature: + + + +* Each Amazon EC2 instance type supports a maximum number of [.noloc]`Pods`. If your managed node group consists of multiple instance types, the smallest number of maximum [.noloc]`Pods` for an instance in the cluster is applied to all nodes in the cluster. +* By default, the maximum number of `Pods` that you can run on a node is 110, but you can change that number. If you change the number and have an existing managed node group, the next AMI or launch template update of your node group results in new nodes coming up with the changed value. +* When transitioning from assigning IP addresses to assigning IP prefixes, we recommend that you create new node groups to increase the number of available IP addresses, rather than doing a rolling replacement of existing nodes. Running [.noloc]`Pods` on a node that has both IP addresses and prefixes assigned can lead to inconsistency in the advertised IP address capacity, impacting the future workloads on the node. For the recommended way of performing the transition, see https://github.com/aws/aws-eks-best-practices/blob/master/content/networking/prefix-mode/index_windows.md#replace-all-nodes-during-migration-from-secondary-ip-mode-to-prefix-delegation-mode-or-vice-versa[Replace all nodes during migration from Secondary IP mode to Prefix Delegation mode or vice versa] in the Amazon EKS best practices guide. +* The security group scope is at the node-level - For more information, see link:vpc/latest/userguide/VPC_SecurityGroups.html[Security group,type="documentation"]. +* IP prefixes assigned to a network interface support high [.noloc]`Pod` density per node and have the best launch time. +* IP prefixes and IP addresses are associated with standard Amazon EC2 elastic network interfaces. Pods requiring specific security groups are assigned the primary IP address of a branch network interface. You can mix [.noloc]`Pods` getting IP addresses, or IP addresses from IP prefixes with [.noloc]`Pods` getting branch network interfaces on the same node. +* For clusters with Linux nodes only. ++ +** After you configure the add-on to assign prefixes to network interfaces, you can't downgrade your [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on to a version lower than `1.9.0` (or `1.10.1`) without removing all nodes in all node groups in your cluster. +** If you're also using security groups for [.noloc]`Pods`, with `POD_SECURITY_GROUP_ENFORCING_MODE`=``standard`` and `AWS_VPC_K8S_CNI_EXTERNALSNAT`=``false``, when your [.noloc]`Pods` communicate with endpoints outside of your VPC, the node's security groups are used, rather than any security groups you've assigned to your [.noloc]`Pods`. ++ +If you're also using <>, with `POD_SECURITY_GROUP_ENFORCING_MODE`=``strict``, when your `Pods` communicate with endpoints outside of your VPC, the `Pod's` security groups are used. + + +[.topic] +[[cni-increase-ip-addresses-procedure,cni-increase-ip-addresses-procedure.title]] +==== Increase the available IP addresses for your Amazon EKS node + +You can increase the number of IP addresses that nodes can assign to [.noloc]`Pods` by assigning IP prefixes, rather than assigning individual secondary IP addresses to your nodes. + +Complete the following before you start the procedure: + + + +* Review the considerations. +* You need an existing cluster. To deploy one, see <>. +* The subnets that your Amazon EKS nodes are in must have sufficient contiguous `/28` (for `IPv4` clusters) or `/80` (for `IPv6` clusters) Classless Inter-Domain Routing (CIDR) blocks. You can only have Linux nodes in an `IPv6` cluster. Using IP prefixes can fail if IP addresses are scattered throughout the subnet CIDR. We recommend that following: ++ +** Using a subnet CIDR reservation so that even if any IP addresses within the reserved range are still in use, upon their release, the IP addresses aren't reassigned. This ensures that prefixes are available for allocation without segmentation. +** Use new subnets that are specifically used for running the workloads that IP prefixes are assigned to. Both [.noloc]`Windows` and [.noloc]`Linux` workloads can run in the same subnet when assigning IP prefixes. +* To assign IP prefixes to your nodes, your nodes must be {aws} Nitro-based. Instances that aren't Nitro-based continue to allocate individual secondary IP addresses, but have a significantly lower number of IP addresses to assign to [.noloc]`Pods` than [.noloc]`Nitro-based` instances do. +* *For clusters with [.noloc]`Linux` nodes only* – If your cluster is configured for the `IPv4` family, you must have version `1.9.0` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on installed. You can check your current version with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node --namespace kube-system | grep Image | cut -d "/" -f 2 +---- ++ +If your cluster is configured for the `IPv6` family, you must have version `1.10.1` of the add-on installed. If your plugin version is earlier than the required versions, you must update it. For more information, see the updating sections of <>. +* *For clusters with [.noloc]`Windows` nodes only*:: + ++ +** Your cluster and its platform version must be at, or later than the versions in the following table. To upgrade your cluster version, see <>. If your cluster isn't at the minimum platform version, then you can't assign IP prefixes to your nodes until Amazon EKS has updated your platform version. ++ +[cols="1,1", options="header"] +|=== +|Kubernetes version +|Platform version + + +|`1.27` +|`eks.3` + +|`1.26` +|`eks.4` + +|`1.25` +|`eks.5` +|=== ++ +You can check your current [.noloc]`Kubernetes` and platform version by replacing [.replaceable]`my-cluster` in the following command with the name of your cluster and then running the modified command: `aws eks describe-cluster --name [.replaceable]``my-cluster`` --query 'cluster.{"Kubernetes Version": version, "Platform Version": platformVersion}'`. +** [.noloc]`Windows` support enabled for your cluster. For more information, see <>. +. Configure your cluster to assign IP address prefixes to nodes. Complete the procedure on the tab that matches your node's operating system. ++ +[.noloc]`Linux`::: +... Enable the parameter to assign prefixes to network interfaces for the Amazon VPC CNI [.noloc]`DaemonSet`. When you deploy a `1.21` or later cluster, version `1.10.1` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on is deployed with it. If you created the cluster with the `IPv6` family, this setting was set to `true` by default. If you created the cluster with the `IPv4` family, this setting was set to `false` by default. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env daemonset aws-node -n kube-system ENABLE_PREFIX_DELEGATION=true +---- ++ +IMPORTANT: Even if your subnet has available IP addresses, if the subnet does not have any contiguous `/28` blocks available, you will see the following error in the [.noloc]`Amazon VPC CNI plugin for Kubernetes` logs. + +[source,bash,subs="verbatim,attributes"] +---- +InsufficientCidrBlocks: The specified subnet does not have enough free cidr blocks to satisfy the request +---- + +This can happen due to fragmentation of existing secondary IP addresses spread out across a subnet. To resolve this error, either create a new subnet and launch [.noloc]`Pods` there, or use an Amazon EC2 subnet CIDR reservation to reserve space within a subnet for use with prefix assignment. For more information, see link:vpc/latest/userguide/subnet-cidr-reservation.html[Subnet CIDR reservations,type="documentation"] in the Amazon VPC User Guide. +... If you plan to deploy a managed node group without a launch template, or with a launch template that you haven't specified an AMI ID in, and you're using a version of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` at or later than the versions listed in the prerequisites, then skip to the next step. Managed node groups automatically calculates the maximum number of [.noloc]`Pods` for you. ++ +If you're deploying a self-managed node group or a managed node group with a launch template that you have specified an AMI ID in, then you must determine the Amazon EKS recommend number of maximum [.noloc]`Pods` for your nodes. Follow the instructions in <>, adding `--cni-prefix-delegation-enabled` to step 3. Note the output for use in a later step. ++ +IMPORTANT: Managed node groups enforces a maximum number on the value of `maxPods`. For instances with less than 30 vCPUs the maximum number is 110 and for all other instances the maximum number is 250. This maximum number is applied whether prefix delegation is enabled or not. +... If you're using a `1.21` or later cluster configured for `IPv6`, skip to the next step. ++ +Specify the parameters in one of the following options. To determine which option is right for you and what value to provide for it, see https://github.com/aws/amazon-vpc-cni-k8s/blob/master/docs/prefix-and-ip-target.md[WARM_PREFIX_TARGET, WARM_IP_TARGET, and MINIMUM_IP_TARGET] on [.noloc]`GitHub`. ++ +You can replace the [.replaceable]`example values` with a value greater than zero. ++ +**** `WARM_PREFIX_TARGET` ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env ds aws-node -n kube-system WARM_PREFIX_TARGET=1 +---- +**** `WARM_IP_TARGET` or `MINIMUM_IP_TARGET` – If either value is set, it overrides any value set for `WARM_PREFIX_TARGET`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env ds aws-node -n kube-system WARM_IP_TARGET=5 +---- +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env ds aws-node -n kube-system MINIMUM_IP_TARGET=2 +---- +... Create one of the following types of node groups with at least one Amazon EC2 Nitro Amazon Linux 2 instance type. For a list of Nitro instance types, see link:AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances[Instances built on the Nitro System,type="documentation"] in the Amazon EC2 User Guide. This capability is not supported on [.noloc]`Windows`. For the options that include [.replaceable]`110`, replace it with either the value from step 3 (recommended), or your own value. ++ +**** *Self-managed* – Deploy the node group using the instructions in <>. Specify the following text for the *BootstrapArguments* parameter. ++ +[source,bash,subs="verbatim,attributes"] +---- +--use-max-pods false --kubelet-extra-args '--max-pods=110' +---- ++ +If you're using `eksctl` to create the node group, you can use the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup --cluster my-cluster --managed=false --max-pods-per-node 110 +---- +**** *Managed* – Deploy your node group using one of the following options: ++ +***** *Without a launch template or with a launch template without an AMI ID specified* – Complete the procedure in <>. Managed node groups automatically calculates the Amazon EKS recommended `max-pods` value for you. +***** *With a launch template with a specified AMI ID* – In your launch template, specify an Amazon EKS optimized AMI ID, or a custom AMI built off the Amazon EKS optimized AMI, then <> and provide the following user data in the launch template. This user data passes arguments into the `bootstrap.sh` file. For more information about the bootstrap file, see https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap.sh] on [.noloc]`GitHub`. ++ +[source,bash,subs="verbatim,attributes"] +---- +/etc/eks/bootstrap.sh my-cluster \ + --use-max-pods false \ + --kubelet-extra-args '--max-pods=110' +---- ++ +If you're using `eksctl` to create the node group, you can use the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup --cluster my-cluster --max-pods-per-node 110 +---- ++ +If you've created a custom AMI that is not built off the Amazon EKS optimized AMI, then you need to custom create the configuration yourself. ++ +NOTE: If you also want to assign IP addresses to [.noloc]`Pods` from a different subnet than the instance's, then you need to enable the capability in this step. For more information, see <>. + + +[.noloc]`Windows`::: +... Enable assignment of IP prefixes. ++ +.... Open the `amazon-vpc-cni` `ConfigMap` for editing. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit configmap -n kube-system amazon-vpc-cni -o yaml +---- +.... Add the following line to the `data` section. ++ +[source,yaml,subs="verbatim,attributes"] +---- + enable-windows-prefix-delegation: "true" +---- +.... Save the file and close the editor. +.... Confirm that the line was added to the `ConfigMap`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get configmap -n kube-system amazon-vpc-cni -o "jsonpath={.data.enable-windows-prefix-delegation}" +---- ++ +If the returned output isn't `true`, then there might have been an error. Try completing the step again. ++ +IMPORTANT: Even if your subnet has available IP addresses, if the subnet does not have any contiguous `/28` blocks available, you will see the following error in the node events. + +[source,bash,subs="verbatim,attributes"] +---- +"failed to allocate a private IP/Prefix address: InsufficientCidrBlocks: The specified subnet does not have enough free cidr blocks to satisfy the request" +---- + +This can happen due to fragmentation of existing secondary IP addresses spread out across a subnet. To resolve this error, either create a new subnet and launch [.noloc]`Pods` there, or use an Amazon EC2 subnet CIDR reservation to reserve space within a subnet for use with prefix assignment. For more information, see link:vpc/latest/userguide/subnet-cidr-reservation.html[Subnet CIDR reservations,type="documentation"] in the Amazon VPC User Guide. +... (Optional) Specify additional configuration for controlling the pre-scaling and dynamic scaling behavior for your cluster. For more information, see https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/docs/windows/prefix_delegation_config_options.md[Configuration options with Prefix Delegation mode on Windows] on GitHub. ++ +.... Open the `amazon-vpc-cni` `ConfigMap` for editing. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit configmap -n kube-system amazon-vpc-cni -o yaml +---- +.... Replace the [.replaceable]`example values` with a value greater than zero and add the entries that you require to the `data` section of the `ConfigMap`. If you set a value for either `warm-ip-target` or `minimum-ip-target`, the value overrides any value set for `warm-prefix-target`. ++ +[source,yaml,subs="verbatim,attributes"] +---- + warm-prefix-target: "1" + warm-ip-target: "5" + minimum-ip-target: "2" +---- +.... Save the file and close the editor. +... Create [.noloc]`Windows` node groups with at least one Amazon EC2 [.noloc]`Nitro` instance type. For a list of [.noloc]`Nitro` instance types, see link:AWSEC2/latest/WindowsGuide/instance-types.html#ec2-nitro-instances[Instances built on the Nitro System,type="documentation"] in the Amazon EC2 User Guide. By default, the maximum number of [.noloc]`Pods` that you can deploy to a node is 110. If you want to increase or decrease that number, specify the following in the user data for the bootstrap configuration. Replace [.replaceable]`max-pods-quantity` with your max pods value. ++ +[source,bash,subs="verbatim,attributes"] +---- +-KubeletExtraArgs '--max-pods=max-pods-quantity' +---- ++ +If you're deploying managed node groups, this configuration needs to be added in the launch template. For more information, see <>. For more information about the configuration parameters for [.noloc]`Windows` bootstrap script, see <>. +. Once your nodes are deployed, view the nodes in your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME STATUS ROLES AGE VERSION +ip-192-168-22-103.region-code.compute.internal Ready 19m v1.XX.X-eks-6b7464 +ip-192-168-97-94.region-code.compute.internal Ready 19m v1.XX.X-eks-6b7464 +---- +. Describe one of the nodes to determine the value of `max-pods` for the node and the number of available IP addresses. Replace [.replaceable]`192.168.30.193` with the `IPv4` address in the name of one of your nodes returned in the previous output. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe node ip-192-168-30-193.region-code.compute.internal | grep 'pods\|PrivateIPv4Address' +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +pods: 110 +vpc.amazonaws.com/PrivateIPv4Address: 144 +---- ++ +In the previous output, `110` is the maximum number of [.noloc]`Pods` that [.noloc]`Kubernetes` will deploy to the node, even though [.replaceable]`144` IP addresses are available. + + +[.topic] +[[security-groups-for-pods,security-groups-for-pods.title]] +=== Assign security groups to individual [.noloc]`pods` + +[abstract] +-- +Learn how to configure security groups for [.noloc]`Pods` on Amazon EKS, integrating Amazon EC2 security groups with [.noloc]`Kubernetes` [.noloc]`Pods` to define network traffic rules. Discover the considerations, setup process, and deploy a sample application with assigned security groups. +-- + +*Applies to*: [.noloc]`Linux` nodes with Amazon EC2 instances + +*Applies to*: Private subnets + +Security groups for [.noloc]`Pods` integrate Amazon EC2 security groups with [.noloc]`Kubernetes` [.noloc]`Pods`. You can use Amazon EC2 security groups to define rules that allow inbound and outbound network traffic to and from [.noloc]`Pods` that you deploy to nodes running on many Amazon EC2 instance types and Fargate. For a detailed explanation of this capability, see the link:containers/introducing-security-groups-for-pods[Introducing security groups for Pods,type="blog"] blog post. + +[[security-groups-for-pods-compatability,security-groups-for-pods-compatability.title]] +==== Compatibility with [.noloc]`Amazon VPC CNI plugin for Kubernetes` features + +You can use security groups for [.noloc]`Pods` with the following features: + + + +* IPv4 Source Network Address Translation - For more information, see <>. +* IPv6 addresses to clusters, Pods, and services - For more information, see <>. +* Restricting traffic using [.noloc]`Kubernetes` network policies - For more information, see <>. + + +[[sg-pods-considerations,sg-pods-considerations.title]] +==== Considerations + +Before deploying security groups for [.noloc]`Pods`, consider the following limitations and conditions: + + + +* Security groups for [.noloc]`Pods` can't be used with [.noloc]`Windows` nodes. +* Security groups for [.noloc]`Pods` can be used with clusters configured for the `IPv6` family that contain Amazon EC2 nodes by using version 1.16.0 or later of the Amazon VPC CNI plugin. You can use security groups for [.noloc]`Pods` with clusters configure `IPv6` family that contain only Fargate nodes by using version 1.7.7 or later of the Amazon VPC CNI plugin. For more information, see <> +* Security groups for [.noloc]`Pods` are supported by most link:AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances[Nitro-based,type="documentation"] Amazon EC2 instance families, though not by all generations of a family. For example, the `m5`, `c5`, `r5`, `m6g`, `c6g`, and `r6g` instance family and generations are supported. No instance types in the `t` family are supported. For a complete list of supported instance types, see the https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/v1.5.0/pkg/aws/vpc/limits.go[limits.go] file on [.noloc]`GitHub`. Your nodes must be one of the listed instance types that have `IsTrunkingCompatible: true` in that file. +* If you're also using [.noloc]`Pod` security policies to restrict access to [.noloc]`Pod` mutation, then the `eks:vpc-resource-controller` [.noloc]`Kubernetes` user must be specified in the [.noloc]`Kubernetes` `ClusterRoleBinding` for the `role` that your `psp` is assigned to. If you're using the default Amazon EKS `psp`, `role`, and `ClusterRoleBinding`, this is the `eks:podsecuritypolicy:authenticated` `ClusterRoleBinding`. For example, you add the user to the `subjects:` section, as shown in the following example: ++ +[source,yaml,subs="verbatim,attributes"] +---- +[...] +subjects: + - kind: Group + apiGroup: rbac.authorization.k8s.io + name: system:authenticated + - apiGroup: rbac.authorization.k8s.io + kind: User + name: eks:vpc-resource-controller + - kind: ServiceAccount + name: eks-vpc-resource-controller +---- +* If you're using custom networking and security groups for [.noloc]`Pods` together, the security group specified by security groups for [.noloc]`Pods` is used instead of the security group specified in the `ENIConfig`. +* If you're using version `1.10.2` or earlier of the Amazon VPC CNI plugin and you include the `terminationGracePeriodSeconds` setting in your [.noloc]`Pod` spec, the value for the setting can't be zero. +* If you're using version `1.10` or earlier of the Amazon VPC CNI plugin, or version `1.11` with `POD_SECURITY_GROUP_ENFORCING_MODE`=``strict``, which is the default setting, then [.noloc]`Kubernetes` services of type `NodePort` and `LoadBalancer` using instance targets with an `externalTrafficPolicy` set to `Local` aren't supported with [.noloc]`Pods` that you assign security groups to. For more information about using a load balancer with instance targets, see <>. +* If you're using version `1.10` or earlier of the Amazon VPC CNI plugin or version `1.11` with `POD_SECURITY_GROUP_ENFORCING_MODE`=``strict``, which is the default setting, source NAT is disabled for outbound traffic from [.noloc]`Pods` with assigned security groups so that outbound security group rules are applied. To access the internet, [.noloc]`Pods` with assigned security groups must be launched on nodes that are deployed in a private subnet configured with a NAT gateway or instance. [.noloc]`Pods` with assigned security groups deployed to public subnets are not able to access the internet. ++ +If you're using version `1.11` or later of the plugin with `POD_SECURITY_GROUP_ENFORCING_MODE`=``standard``, then [.noloc]`Pod` traffic destined for outside of the VPC is translated to the IP address of the instance's primary network interface. For this traffic, the rules in the security groups for the primary network interface are used, rather than the rules in the [.noloc]`Pod's` security groups. +* To use [.noloc]`Calico` network policy with [.noloc]`Pods` that have associated security groups, you must use version `1.11.0` or later of the Amazon VPC CNI plugin and set `POD_SECURITY_GROUP_ENFORCING_MODE`=``standard``. Otherwise, traffic flow to and from [.noloc]`Pods` with associated security groups are not subjected to [.noloc]`Calico` network policy enforcement and are limited to Amazon EC2 security group enforcement only. To update your Amazon VPC CNI version, see <> +* [.noloc]`Pods` running on Amazon EC2 nodes that use security groups in clusters that use https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/[NodeLocal DNSCache] are only supported with version `1.11.0` or later of the Amazon VPC CNI plugin and with `POD_SECURITY_GROUP_ENFORCING_MODE`=``standard``. To update your Amazon VPC CNI plugin version, see <> +* Security groups for [.noloc]`Pods` might lead to higher [.noloc]`Pod` startup latency for [.noloc]`Pods` with high churn. This is due to rate limiting in the resource controller. +* The EC2 security group scope is at the [.noloc]`Pod`-level - For more information, see link:vpc/latest/userguide/VPC_SecurityGroups.html[Security group,type="documentation"]. ++ +If you set `POD_SECURITY_GROUP_ENFORCING_MODE=standard` and `AWS_VPC_K8S_CNI_EXTERNALSNAT=false`, traffic destined for endpoints outside the VPC use the node's security groups, not the [.noloc]`Pod's` security groups. + + +[.topic] +[[security-groups-pods-deployment,security-groups-pods-deployment.title]] +==== Configure the [.noloc]`Amazon VPC CNI plugin for Kubernetes` for security groups for Amazon EKS [.noloc]`Pods` + +If you use [.noloc]`Pods` with Amazon EC2 instances, you need to configure the [.noloc]`Amazon VPC CNI plugin for Kubernetes` for security groups + +If you use Fargate [.noloc]`Pods` only, and don't have any Amazon EC2 nodes in your cluster, see <>. + +. Check your current [.noloc]`Amazon VPC CNI plugin for Kubernetes` version with the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.7.6 +---- ++ +If your [.noloc]`Amazon VPC CNI plugin for Kubernetes` version is earlier than `1.7.7`, then update the plugin to version `1.7.7` or later. For more information, see <> +. Add the link:iam/home#/policies/arn:aws:iam::aws:policy/AmazonEKSVPCResourceController[AmazonEKSVPCResourceController,type="console"] managed IAM policy to the <> that is associated with your Amazon EKS cluster. The policy allows the role to manage network interfaces, their private IP addresses, and their attachment and detachment to and from network instances. ++ +.. Retrieve the name of your cluster IAM role and store it in a variable. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +cluster_role=$(aws eks describe-cluster --name my-cluster --query cluster.roleArn --output text | cut -d / -f 2) +---- +.. Attach the policy to the role. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam attach-role-policy --policy-arn {arn-aws}iam::aws:policy/AmazonEKSVPCResourceController --role-name $cluster_role +---- +. Enable the Amazon VPC CNI add-on to manage network interfaces for [.noloc]`Pods` by setting the `ENABLE_POD_ENI` variable to `true` in the `aws-node` [.noloc]`DaemonSet`. Once this setting is set to `true`, for each node in the cluster the add-on creates a `cninode` custom resource. The VPC resource controller creates and attaches one special network interface called a _trunk network interface_ with the description `aws-k8s-trunk-eni`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env daemonset aws-node -n kube-system ENABLE_POD_ENI=true +---- ++ +NOTE: The trunk network interface is included in the maximum number of network interfaces supported by the instance type. For a list of the maximum number of network interfaces supported by each instance type, see link:AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI[IP addresses per network interface per instance type,type="documentation"] in the _Amazon EC2 User Guide_. If your node already has the maximum number of standard network interfaces attached to it then the VPC resource controller will reserve a space. You will have to scale down your running [.noloc]`Pods` enough for the controller to detach and delete a standard network interface, create the trunk network interface, and attach it to the instance. +. You can see which of your nodes have a `CNINode` custom resource with the following command. If `No resources found` is returned, then wait several seconds and try again. The previous step requires restarting the [.noloc]`Amazon VPC CNI plugin for` Kubernetes Pods`, which takes several seconds. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl get cninode -A + NAME FEATURES + ip-192-168-64-141.us-west-2.compute.internal [{"name":"SecurityGroupsForPods"}] + ip-192-168-7-203.us-west-2.compute.internal [{"name":"SecurityGroupsForPods"}] +---- ++ +If you are using VPC CNI versions older than `1.15`, node labels were used instead of the `CNINode` custom resource. You can see which of your nodes have the node label `aws-k8s-trunk-eni` set to `true` with the following command. If `No resources found` is returned, then wait several seconds and try again. The previous step requires restarting the [.noloc]`Amazon VPC CNI plugin for Kubernetes Pods`, which takes several seconds. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes -o wide -l vpc.amazonaws.com/has-trunk-attached=true +- +---- ++ +Once the trunk network interface is created, [.noloc]`Pods` are assigned secondary IP addresses from the trunk or standard network interfaces. The trunk interface is automatically deleted if the node is deleted. ++ +When you deploy a security group for a [.noloc]`Pod` in a later step, the VPC resource controller creates a special network interface called a _branch network interface_ with a description of `aws-k8s-branch-eni` and associates the security groups to it. Branch network interfaces are created in addition to the standard and trunk network interfaces attached to the node. ++ +If you are using liveness or readiness probes, then you also need to disable _TCP early demux_, so that the `kubelet` can connect to [.noloc]`Pods` on branch network interfaces using TCP. To disable _TCP early demux_, run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl patch daemonset aws-node -n kube-system \ + -p '{"spec": {"template": {"spec": {"initContainers": [{"env":[{"name":"DISABLE_TCP_EARLY_DEMUX","value":"true"}],"name":"aws-vpc-cni-init"}]}}}}' +---- ++ +NOTE: If you're using `1.11.0` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on and set `POD_SECURITY_GROUP_ENFORCING_MODE`=``standard``, as described in the next step, then you don't need to run the previous command. +. If your cluster uses `NodeLocal DNSCache`, or you want to use [.noloc]`Calico` network policy with your [.noloc]`Pods` that have their own security groups, or you have [.noloc]`Kubernetes` services of type `NodePort` and `LoadBalancer` using instance targets with an `externalTrafficPolicy` set to `Local` for [.noloc]`Pods` that you want to assign security groups to, then you must be using version `1.11.0` or later of the [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on, and you must enable the following setting: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set env daemonset aws-node -n kube-system POD_SECURITY_GROUP_ENFORCING_MODE=standard +---- ++ +IMPORTANT: +** [.noloc]`Pod` security group rules aren't applied to traffic between [.noloc]`Pods` or between [.noloc]`Pods` and [.noloc]`services`, such as `kubelet` or `nodeLocalDNS`, that are on the same node. Pods using different security groups on the same node can't communicate because they are configured in different subnets, and routing is disabled between these subnets. +** Outbound traffic from [.noloc]`Pods` to addresses outside of the VPC is network address translated to the IP address of the instance's primary network interface (unless you've also set `AWS_VPC_K8S_CNI_EXTERNALSNAT=true`). For this traffic, the rules in the security groups for the primary network interface are used, rather than the rules in the [.noloc]`Pod's` security groups. +** For this setting to apply to existing [.noloc]`Pods`, you must restart the [.noloc]`Pods` or the nodes that the [.noloc]`Pods` are running on. + +. To see how to use a security group policy for your [.noloc]`Pod`, see <>. + + +[.topic] +[[sg-pods-example-deployment,sg-pods-example-deployment.title]] +==== Use a security group policy for an Amazon EKS [.noloc]`Pod` + +To use security groups for [.noloc]`Pods`, you must have an existing security group. The following steps show you how to use the security group policy for a [.noloc]`Pod`. Unless otherwise noted, complete all steps from the same terminal because variables are used in the following steps that don't persist across terminals. + +If you have a [.noloc]`Pod` with Amazon EC2 instances, you must configure the plugin before you use this procedure. For more information, see <>. + +. Create a [.noloc]`Kubernetes` namespace to deploy resources to. You can replace [.replaceable]`my-namespace` with the name of a namespace that you want to use. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl create namespace my-namespace +---- +. [[deploy-securitygrouppolicy]]Deploy an Amazon EKS `SecurityGroupPolicy` to your cluster. ++ +.. Copy the following contents to your device. You can replace [.replaceable]`podSelector` with `serviceAccountSelector` if you'd rather select [.noloc]`Pods` based on service account labels. You must specify one selector or the other. An empty `podSelector` (example: `podSelector: {}`) selects all [.noloc]`Pods` in the namespace. You can change [.replaceable]`my-role` to the name of your role. An empty `serviceAccountSelector` selects all service accounts in the namespace. You can replace [.replaceable]`my-security-group-policy` with a name for your `SecurityGroupPolicy` and [.replaceable]`my-namespace` with the namespace that you want to create the `SecurityGroupPolicy` in. ++ +You must replace [.replaceable]`my_pod_security_group_id` with the ID of an existing security group. If you don't have an existing security group, then you must create one. For more information, see link:AWSEC2/latest/UserGuide/ec2-security-groups.html[Amazon EC2 security groups for Linux instances,type="documentation"] in the link:AWSEC2/latest/UserGuide/[Amazon EC2 User Guide,type="documentation"]. You can specify 1-5 security group IDs. If you specify more than one ID, then the combination of all the rules in all the security groups are effective for the selected [.noloc]`Pods`. ++ +[source,yaml,subs="verbatim,attributes"] +---- +cat >my-security-group-policy.yaml <sample-application.yaml < +my-deployment-5df6f7687b-j9fl4 1/1 Running 0 7m51s 192.168.70.145 ip-192-168-92-33.region-code.compute.internal +my-deployment-5df6f7687b-rjxcz 1/1 Running 0 7m51s 192.168.73.207 ip-192-168-92-33.region-code.compute.internal +my-deployment-5df6f7687b-zmb42 1/1 Running 0 7m51s 192.168.63.27 ip-192-168-33-28.region-code.compute.internal +---- ++ +[NOTE] +==== +Try these tips if any [.noloc]`Pods` are stuck. + +* If any [.noloc]`Pods` are stuck in the `Waiting` state, then run `kubectl describe pod [.replaceable]``my-deployment-xxxxxxxxxx-xxxxx`` -n [.replaceable]``my-namespace```. If you see `Insufficient permissions: Unable to create Elastic Network Interface.`, confirm that you added the IAM policy to the IAM cluster role in a previous step. +* If any [.noloc]`Pods` are stuck in the `Pending` state, confirm that your node instance type is listed in https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go[limits.go] and that the product of the maximum number of branch network interfaces supported by the instance type multiplied times the number of nodes in your node group hasn't already been met. For example, an `m5.large` instance supports nine branch network interfaces. If your node group has five nodes, then a maximum of 45 branch network interfaces can be created for the node group. The 46th [.noloc]`Pod` that you attempt to deploy will sit in `Pending` state until another [.noloc]`Pod` that has associated security groups is deleted. + +==== ++ +If you run `kubectl describe pod [.replaceable]``my-deployment-xxxxxxxxxx-xxxxx`` -n [.replaceable]``my-namespace``` and see a message similar to the following message, then it can be safely ignored. This message might appear when the [.noloc]`Amazon VPC CNI plugin for Kubernetes` tries to set up host networking and fails while the network interface is being created. The plugin logs this event until the network interface is created. ++ +[source,bash,subs="verbatim,attributes"] +---- +Failed to create Pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "e24268322e55c8185721f52df6493684f6c2c3bf4fd59c9c121fd4cdc894579f" network for Pod "my-deployment-5df6f7687b-4fbjm": networkPlugin +cni failed to set up Pod "my-deployment-5df6f7687b-4fbjm-c89wx_my-namespace" network: add cmd: failed to assign an IP address to container +---- ++ +You can't exceed the maximum number of [.noloc]`Pods` that can be run on the instance type. For a list of the maximum number of [.noloc]`Pods` that you can run on each instance type, see https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt[eni-max-pods.txt] on [.noloc]`GitHub`. When you delete a [.noloc]`Pod` that has associated security groups, or delete the node that the [.noloc]`Pod` is running on, the VPC resource controller deletes the branch network interface. If you delete a cluster with [.noloc]`Pods` using [.noloc]`Pods` for security groups, then the controller doesn't delete the branch network interfaces, so you'll need to delete them yourself. For information about how to delete network interfaces, see link:AWSEC2/latest/UserGuide/using-eni.html#delete_eni[Delete a network interface,type="documentation"] in the Amazon EC2 User Guide. +. In a separate terminal, shell into one of the [.noloc]`Pods`. For the remainder of this topic, this terminal is referred to as `TerminalB`. Replace [.replaceable]`5df6f7687b-4fbjm` with the ID of one of the [.noloc]`Pods` returned in your output from the previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl exec -it -n my-namespace my-deployment-5df6f7687b-4fbjm -- /bin/bash +---- +. From the shell in `TerminalB`, confirm that the sample application works. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl my-app +---- ++ +An example output is as follows. ++ +[source,html,subs="verbatim"] +---- + + + +Welcome to nginx! +[...] +---- ++ +You received the output because all [.noloc]`Pods` running the application are associated with the security group that you created. That group contains a rule that allows all traffic between all [.noloc]`Pods` that the security group is associated to. DNS traffic is allowed outbound from that security group to the cluster security group, which is associated with your nodes. The nodes are running the [.noloc]`CoreDNS` [.noloc]`Pods`, which your [.noloc]`Pods` did a name lookup to. +. From `TerminalA`, remove the security group rules that allow DNS communication to the cluster security group from your security group. If you didn't add the DNS rules to the cluster security group in a previous step, then replace [.replaceable]`$my_cluster_security_group_id` with the ID of the security group that you created the rules in. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws ec2 revoke-security-group-ingress --group-id $my_cluster_security_group_id --security-group-rule-ids $my_tcp_rule_id +aws ec2 revoke-security-group-ingress --group-id $my_cluster_security_group_id --security-group-rule-ids $my_udp_rule_id +---- +. From `TerminalB`, attempt to access the application again. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl my-app +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl: (6) Could not resolve host: my-app +---- ++ +The attempt fails because the [.noloc]`Pod` is no longer able to access the [.noloc]`CoreDNS` [.noloc]`Pods`, which have the cluster security group associated to them. The cluster security group no longer has the security group rules that allow DNS communication from the security group associated to your [.noloc]`Pod`. ++ +If you attempt to access the application using the IP addresses returned for one of the [.noloc]`Pods` in a previous step, you still receive a response because all ports are allowed between [.noloc]`Pods` that have the security group associated to them and a name lookup isn't required. +. Once you've finished experimenting, you can remove the sample security group policy, application, and security group that you created. Run the following commands from `TerminalA`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete namespace my-namespace +aws ec2 revoke-security-group-ingress --group-id $my_pod_security_group_id --security-group-rule-ids $my_inbound_self_rule_id +wait +sleep 45s +aws ec2 delete-security-group --group-id $my_pod_security_group_id +---- + + +[.topic] +[[pod-multiple-network-interfaces,pod-multiple-network-interfaces.title]] +=== Attach multiple network interfaces to [.noloc]`Pods` with [.noloc]`Multus` + +[abstract] +-- +Learn how to use Multus CNI to attach multiple network interfaces to a [.noloc]`Pod` in Amazon EKS for advanced networking scenarios, while leveraging the [.noloc]`Amazon VPC CNI` plugin for primary networking. +-- + +Multus CNI is a container network interface (CNI) plugin for Amazon EKS that enables attaching multiple network interfaces to a [.noloc]`Pod`. For more information, see the https://github.com/k8snetworkplumbingwg/multus-cni[Multus-CNI] documentation on [.noloc]`GitHub`. + +In Amazon EKS, each [.noloc]`Pod` has one network interface assigned by the Amazon VPC CNI plugin. With Multus, you can create a multi-homed [.noloc]`Pod` that has multiple interfaces. This is accomplished by Multus acting as a "meta-plugin"; a CNI plugin that can call multiple other CNI plugins. {aws} support for Multus comes configured with the Amazon VPC CNI plugin as the default delegate plugin. + +* Amazon EKS won't be building and publishing single root I/O virtualization (SR-IOV) and Data Plane Development Kit (DPDK) CNI plugins. However, you can achieve packet acceleration by connecting directly to Amazon EC2 Elastic Network Adapters (ENA) through Multus managed host-device and `ipvlan` plugins. +* Amazon EKS is supporting Multus, which provides a generic process that enables simple chaining of additional CNI plugins. Multus and the process of chaining is supported, but {aws} won't provide support for all compatible CNI plugins that can be chained, or issues that may arise in those CNI plugins that are unrelated to the chaining configuration. +* Amazon EKS is providing support and life cycle management for the Multus plugin, but isn't responsible for any IP address or additional management associated with the additional network interfaces. The IP address and management of the default network interface utilizing the Amazon VPC CNI plugin remains unchanged. +* Only the Amazon VPC CNI plugin is officially supported as the default delegate plugin. You need to modify the published Multus installation manifest to reconfigure the default delegate plugin to an alternate CNI if you choose not to use the Amazon VPC CNI plugin for primary networking. +* Multus is only supported when using the Amazon VPC CNI as the primary CNI. We do not support the Amazon VPC CNI when used for higher order interfaces, secondary or otherwise. +* To prevent the Amazon VPC CNI plugin from trying to manage additional network interfaces assigned to [.noloc]`Pods`, add the following tag to the network interface: ++ +*key*:: +: `node.k8s.amazonaws.com/no_manage` ++ +*value*:: +: `true` +* Multus is compatible with network policies, but the policy has to be enriched to include ports and IP addresses that may be part of additional network interfaces attached to [.noloc]`Pods`. + +For an implementation walk through, see the https://github.com/aws-samples/eks-install-guide-for-multus/blob/main/README.md[Multus Setup Guide] on [.noloc]`GitHub`. + + +[.topic] +[[alternate-cni-plugins,alternate-cni-plugins.title]] +== Alternate CNI plugins for Amazon EKS clusters + +[abstract] +-- +Learn how to use alternate network and security plugins on Amazon EKS to customize networking for your [.noloc]`Kubernetes` clusters on Amazon EC2 nodes. +-- + +The https://github.com/aws/amazon-vpc-cni-plugins[Amazon VPC CNI plugin for Kubernetes] is the only CNI plugin supported by Amazon EKS with Amazon EC2 nodes. Amazon EKS supports the core capabilities of Cilium and Calico for Amazon EKS Hybrid Nodes. Amazon EKS runs upstream [.noloc]`Kubernetes`, so you can install alternate compatible CNI plugins to Amazon EC2 nodes in your cluster. If you have Fargate nodes in your cluster, the [.noloc]`Amazon VPC CNI plugin for Kubernetes` is already on your Fargate nodes. It's the only CNI plugin you can use with Fargate nodes. An attempt to install an alternate CNI plugin on Fargate nodes fails. + +If you plan to use an alternate CNI plugin on Amazon EC2 nodes, we recommend that you obtain commercial support for the plugin or have the in-house expertise to troubleshoot and contribute fixes to the CNI plugin project. + +Amazon EKS maintains relationships with a network of partners that offer support for alternate compatible CNI plugins. For details about the versions, qualifications, and testing performed, see the following partner documentation. + +[cols="1,1,1", options="header"] +|=== +|Partner +|Product +|Documentation + + +|Tigera +|https://www.tigera.io/partners/aws/[Calico] +|https://docs.projectcalico.org/getting-started/kubernetes/managed-public-cloud/eks[Installation instructions] + +|Isovalent +|https://cilium.io[Cilium] +|https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/[Installation instructions] + +|Juniper +|https://www.juniper.net/us/en/products/sdn-and-orchestration/contrail/cloud-native-contrail-networking.html[Cloud-Native Contrail Networking (CN2)] +|https://www.juniper.net/documentation/us/en/software/cn-cloud-native23.2/cn-cloud-native-eks-install-and-lcm/index.html[Installation instructions] + +|VMware +|https://antrea.io/[Antrea] +|https://antrea.io/docs/main/docs/eks-installation[Installation instructions] +|=== + +Amazon EKS aims to give you a wide selection of options to cover all use cases. + + +[[alternate-network-policy-plugins,alternate-network-policy-plugins.title]] +=== Alternate compatible network policy plugins + +https://www.tigera.io/project-calico[Calico] is a widely adopted solution for container networking and security. Using [.noloc]`Calico` on EKS provides a fully compliant network policy enforcement for your EKS clusters. Additionally, you can opt to use [.noloc]`Calico's` networking, which conserve IP addresses from your underlying VPC. https://www.tigera.io/tigera-products/calico-cloud/[Calico Cloud] enhances the features of [.noloc]`Calico Open Source`, providing advanced security and observability capabilities. + +Traffic flow to and from [.noloc]`Pods` with associated security groups are not subjected to [.noloc]`Calico` network policy enforcement and are limited to Amazon VPC security group enforcement only. + +If you use [.noloc]`Calico` network policy enforcement, we recommend that you set the environment variable `ANNOTATE_POD_IP` to `true` to avoid a known issue with [.noloc]`Kubernetes`. To use this feature, you must add `patch` permission for pods to the `aws-node` [.noloc]`ClusterRole`. Note that adding patch permissions to the `aws-node` [.noloc]`DaemonSet` increases the security scope for the plugin. For more information, see https://github.com/aws/amazon-vpc-cni-k8s/?tab=readme-ov-file#annotate_pod_ip-v193[ANNOTATE_POD_IP] in the VPC CNI repo on GitHub. + +=== Considerations for Amazon EKS Auto Mode + +Amazon EKS Auto Mode does not support alternate CNI plugins or network policy plugins. For more information, see <>. + +[.topic] +[[aws-load-balancer-controller,aws-load-balancer-controller.title]] +== Route internet traffic with {aws} Load Balancer Controller + +[abstract] +-- +Learn how to configure and use the [.noloc]`{aws} Load Balancer Controller` to expose [.noloc]`Kubernetes` cluster apps to the internet with {aws} Elastic Load Balancing for [.noloc]`Kubernetes` [.noloc]`services` and [.noloc]`ingresses`. +-- + +The [.noloc]`{aws} Load Balancer Controller` manages {aws} Elastic Load Balancers for a [.noloc]`Kubernetes` cluster. You can use the controller to expose your cluster apps to the internet. The controller provisions {aws} load balancers that point to cluster Service or Ingress resources. In other words, the controller creates a single IP address or DNS name that points to multiple pods in your cluster. + + + +image::images/lbc-overview.png["Architecture diagram. Illustration of traffic coming from internet users, to Amazon Load Balancer. Amazon Load Balancer distributes traffic to pods in the cluster.",scaledwidth=50%] + +The controller watches for [.noloc]`Kubernetes` [.noloc]`Ingress` or [.noloc]`Service` resources. In response, it creates the appropriate {aws} Elastic Load Balancing resources. You can configure the specific behavior of the load balancers by applying annotations to the [.noloc]`Kubernetes` resources. For example, you can attach {aws} security groups to load balancers using annotations. + +The controller provisions the following resources: + + + +*[.noloc]`Kubernetes` `Ingress`*:: +The LBC creates an link:elasticloadbalancing/latest/application/introduction.html[{aws} Application Load Balancer (ALB),type="documentation"] when you create a [.noloc]`Kubernetes` `Ingress`. https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/[Review the annotations you can apply to an Ingress resource.] + + +*[.noloc]`Kubernetes` service of the `LoadBalancer` type*:: +The LBC creates an link:elasticloadbalancing/latest/network/introduction.html[{aws} Network Load Balancer (NLB),type="documentation"]when you create a [.noloc]`Kubernetes` service of type `LoadBalancer`. https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/annotations/[Review the annotations you can apply to a Service resource.] ++ +In the past, the [.noloc]`Kubernetes` network load balancer was used for _instance_ targets, but the LBC was used for _IP_ targets. With the [.noloc]`{aws} Load Balancer Controller` version `2.3.0` or later, you can create NLBs using either target type. For more information about NLB target types, see link:elasticloadbalancing/latest/network/load-balancer-target-groups.html#target-type[Target type,type="documentation"] in the User Guide for Network Load Balancers. + +The controller is an https://github.com/kubernetes-sigs/aws-load-balancer-controller[open-source project] managed on [.noloc]`GitHub`. + +Before deploying the controller, we recommend that you review the prerequisites and considerations in <> and <>. In those topics, you will deploy a sample app that includes an {aws} load balancer. + + +[[lbc-overview,lbc-overview.title]] +=== Install the controller + +You can use one of the following procedures to install the [.noloc]`{aws} Load Balancer Controller`: + + + +* If you are new to Amazon EKS we recommend that you use Helm for the installation because it simplifies the [.noloc]`{aws} Load Balancer Controller` installation. For more information, see <>. +* For advanced configurations, such as clusters with restricted network access to public container registries, use [.noloc]`Kubernetes` Manifests. For more information, see <>. + + +[[lbc-deprecated,lbc-deprecated.title]] +=== Migrate from deprecated controller versions + +* If you have deprecated versions of the [.noloc]`{aws} Load Balancer Controller` installed, see <>. +* Deprecated versions cannot be upgraded. They must be removed and a current version of the [.noloc]`{aws} Load Balancer Controller` installed. ++ +[[lbc-deprecated-list]] +* Deprecated versions include: ++ +** {aws} ALB Ingress Controller for [.noloc]`Kubernetes` ("Ingress Controller"), a predecessor to the [.noloc]`{aws} Load Balancer Controller`. +** Any `0.1.[.replaceable]``x``` version of the [.noloc]`{aws} Load Balancer Controller` + + +[[lbc-legacy,lbc-legacy.title]] +=== Legacy cloud provider + +[.noloc]`Kubernetes` includes a legacy cloud provider for {aws}. The legacy cloud provider is capable of provisioning {aws} load balancers, similar to the [.noloc]`{aws} Load Balancer Controller`. The legacy cloud provider creates Classic Load Balancers. If you do not install the [.noloc]`{aws} Load Balancer Controller`, [.noloc]`Kubernetes` will default to using the legacy cloud provider. You should install the [.noloc]`{aws} Load Balancer Controller` and avoid using the legacy cloud provider. + +[IMPORTANT] +==== + +In versions 2.5 and newer, the [.noloc]`{aws} Load Balancer Controller` becomes the default controller for [.noloc]`Kubernetes` _service_ resources with the `type: LoadBalancer` and makes an {aws} Network Load Balancer (NLB) for each service. It does this by making a mutating webhook for services, which sets the `spec.loadBalancerClass` field to `service.k8s.aws/nlb` for new services of `type: LoadBalancer`. You can turn off this feature and revert to using the https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/annotations/#legacy-cloud-provider[legacy Cloud Provider] as the default controller, by setting the helm chart value `enableServiceMutatorWebhook` to `false`. The cluster won't provision new Classic Load Balancers for your services unless you turn off this feature. Existing Classic Load Balancers will continue to work. + +==== + + +[.topic] +[[lbc-helm,lbc-helm.title]] +=== Install [.noloc]`{aws} Load Balancer Controller` with [.noloc]`Helm` + +[abstract] +-- +Learn how to install the [.noloc]`{aws} Load Balancer Controller` on Amazon EKS using Helm to manage K8s load balancing with {aws} Cloud. Discover the prerequisites and steps for creating an IAM role, installing with Helm, and verifying the controller deployment. +-- + +[TIP] +==== +With Amazon EKS Auto Mode, you don't need to install or upgrade networking add-ons. Auto Mode includes pod networking and load balancing capabilities. + +For more information, see <>. +==== + +This topic describes how to install the [.noloc]`{aws} Load Balancer Controller` using Helm, a package manager for [.noloc]`Kubernetes`, and `eksctl`. The controller is installed with default options. For more information about the controller, including details on configuring it with annotations, see the https://kubernetes-sigs.github.io/aws-load-balancer-controller/[{aws} Load Balancer Controller Documentation] on [.noloc]`GitHub`. + +In the following steps, replace the [.replaceable]`example values` with your own values. + +[[lbc-prereqs,lbc-prereqs.title]] +==== Prerequisites + +Before starting this tutorial, you must install and configure the following tools and resources that you need to create and manage an Amazon EKS cluster. + + + +* An existing Amazon EKS cluster. To deploy one, see <>. +* An existing {aws} Identity and Access Management (IAM) [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you already have one, or to create one, see <>. +* Make sure that your [.noloc]`Amazon VPC CNI plugin for Kubernetes`, `kube-proxy`, and [.noloc]`CoreDNS` add-ons are at the minimum versions listed in <>. +* Familiarity with {aws} Elastic Load Balancing. For more information, see the link:elasticloadbalancing/latest/userguide/[Elastic Load Balancing User Guide,type="documentation"]. +* Familiarity with Kubernetes https://kubernetes.io/docs/concepts/services-networking/service/[service] and https://kubernetes.io/docs/concepts/services-networking/ingress/[ingress] resources. + + +* https://helm.sh/docs/helm/helm_install/[Helm] installed locally. + + +[[lbc-helm-iam,lbc-helm-iam.title]] +==== Step 1: Create IAM Role using `eksctl` + +[NOTE] +==== + +You only need to create an IAM Role for the [.noloc]`{aws} Load Balancer Controller` once per {aws-account}. Check if `AmazonEKSLoadBalancerControllerRole` exists in the link:iam[IAM Console,type="console"]. If this role exists, skip to <>. + +==== + +[NOTE] +==== + +Below example is referring to the [.noloc]`{aws} Load Balancer Controller` **v2.11.0** release version. For more information about all releases, see the https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/[{aws} Load Balancer Controller Release Page] on [.noloc]`GitHub`. + +==== + +. Download an IAM policy for the [.noloc]`{aws} Load Balancer Controller` that allows it to make calls to {aws} APIs on your behalf. ++ +==== +[role="tablist"] +{aws}::: ++ +[source,shell,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.11.0/docs/install/iam_policy.json +---- + + +{aws} GovCloud (US)::: ++ +[source,shell,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.11.0/docs/install/iam_policy_us-gov.json +---- ++ +[source,shell,subs="verbatim,attributes"] +---- +mv iam_policy_us-gov.json iam_policy.json +---- +==== ++ +. Create an IAM policy using the policy downloaded in the previous step. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws iam create-policy \ + --policy-name AWSLoadBalancerControllerIAMPolicy \ + --policy-document file://iam_policy.json +---- ++ +NOTE: If you view the policy in the {aws-management-console}, the console shows warnings for the *ELB* service, but not for the *ELB v2* service. This happens because some of the actions in the policy exist for *ELB v2*, but not for *ELB*. You can ignore the warnings for *ELB*. +. Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your account ID, and then run the command. If your cluster is in the {aws} GovCloud (US-East) or {aws} GovCloud (US-West) {aws} Regions, then replace `{arn-aws}` with `arn:aws-us-gov:`. ++ +[source,shell,subs="verbatim,attributes"] +---- +eksctl create iamserviceaccount \ + --cluster=my-cluster \ + --namespace=kube-system \ + --name=aws-load-balancer-controller \ + --role-name AmazonEKSLoadBalancerControllerRole \ + --attach-policy-arn={arn-aws}iam::111122223333:policy/AWSLoadBalancerControllerIAMPolicy \ + --approve +---- + + +[[lbc-helm-install,lbc-helm-install.title]] +==== Step 2: Install [.noloc]`{aws} Load Balancer Controller` + +. Add the `eks-charts` Helm chart repository. {aws} maintains https://github.com/aws/eks-charts[this repository] on GitHub. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm repo add eks https://aws.github.io/eks-charts +---- +. Update your local repo to make sure that you have the most recent charts. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm repo update eks +---- +. Install the [.noloc]`{aws} Load Balancer Controller`. ++ +If you're deploying the controller to Amazon EC2 nodes that have https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[restricted access to the Amazon EC2 instance metadata service (IMDS)], or if you're deploying to Fargate or Amazon EKS Hybrid Nodes, then add the following flags to the `helm` command that follows: ++ +*** `--set region=[.replaceable]``region-code``` +*** `--set vpcId=[.replaceable]``vpc-xxxxxxxx``` ++ +Replace [.replaceable]`my-cluster` with the name of your cluster. In the following command, `aws-load-balancer-controller` is the [.noloc]`Kubernetes` service account that you created in a previous step. ++ +For more information about configuring the helm chart, see https://github.com/aws/eks-charts/blob/master/stable/aws-load-balancer-controller/values.yaml[values.yaml] on GitHub. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm install aws-load-balancer-controller eks/aws-load-balancer-controller \ + -n kube-system \ + --set clusterName=my-cluster \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller +---- + + +[IMPORTANT] +==== +The deployed chart doesn't receive security updates automatically. You need to manually upgrade to a newer chart when it becomes available. When upgrading, change [.replaceable]`install` to `upgrade` in the previous command. +==== + +The `helm install` command automatically installs the custom resource definitions ([.noloc]`CRDs`) for the controller. The `helm upgrade` command does not. If you use `helm upgrade,` you must manually install the [.noloc]`CRDs`. Run the following command to install the [.noloc]`CRDs`: + +[source,shell,subs="verbatim,attributes"] +---- +wget https://raw.githubusercontent.com/aws/eks-charts/master/stable/aws-load-balancer-controller/crds/crds.yaml +kubectl apply -f crds.yaml +---- + + +[[lbc-helm-verify,lbc-helm-verify.title]] +==== Step 3: Verify that the controller is installed +. Verify that the controller is installed. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl get deployment -n kube-system aws-load-balancer-controller +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY UP-TO-DATE AVAILABLE AGE +aws-load-balancer-controller 2/2 2 2 84s +---- ++ +You receive the previous output if you deployed using Helm. If you deployed using the [.noloc]`Kubernetes` manifest, you only have one replica. +. Before using the controller to provision {aws} resources, your cluster must meet specific requirements. For more information, see <> and <>. + +// GDC Must Fix + +[.topic] +[[lbc-manifest,lbc-manifest.title]] +=== Install [.noloc]`{aws} Load Balancer Controller` with manifests + +[abstract] +-- +Install the [.noloc]`{aws} Load Balancer Controller` add-on for Amazon EKS using [.noloc]`Kubernetes` manifests to provision Elastic Load Balancing resources. Configure IAM role and install `cert-manager` before applying controller manifest. +-- + +[TIP] +==== +With Amazon EKS Auto Mode, you don't need to install or upgrade networking add-ons. Auto Mode includes pod networking and load balancing capabilities. + +For more information, see <>. +==== + +This topic describes how to install the controller by downloading and applying [.noloc]`Kubernetes` manifests. You can view the full https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/[documentation] for the controller on [.noloc]`GitHub`. + +In the following steps, replace the [.replaceable]`example values` with your own values. + +[[lbc-manifest-prereqs,lbc-manifest-prereqs.title]] +==== Prerequisites + +Before starting this tutorial, you must install and configure the following tools and resources that you need to create and manage an Amazon EKS cluster. + + + +* An existing Amazon EKS cluster. To deploy one, see <>. +* An existing {aws} Identity and Access Management (IAM) [.noloc]`OpenID Connect` ([.noloc]`OIDC`) provider for your cluster. To determine whether you already have one, or to create one, see <>. +* Make sure that your [.noloc]`Amazon VPC CNI plugin for Kubernetes`, `kube-proxy`, and [.noloc]`CoreDNS` add-ons are at the minimum versions listed in <>. +* Familiarity with {aws} Elastic Load Balancing. For more information, see the link:elasticloadbalancing/latest/userguide/[Elastic Load Balancing User Guide,type="documentation"]. +* Familiarity with Kubernetes https://kubernetes.io/docs/concepts/services-networking/service/[service] and https://kubernetes.io/docs/concepts/services-networking/ingress/[ingress] resources. + + +[[lbc-iam,lbc-iam.title]] +==== Step 1: Configure IAM + +[NOTE] +==== + +You only need to create a role for the [.noloc]`{aws} Load Balancer Controller` one per {aws} account. Check if `AmazonEKSLoadBalancerControllerRole` exists in the link:iam[IAM Console,type="console"]. If this role exists, skip to <>. + +==== + +[NOTE] +==== + +Below example is referring to the [.noloc]`{aws} Load Balancer Controller` **v2.11.0** release version. For more inforamtion about all releases, see the https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/[{aws} Load Balancer Controller Release Page] on [.noloc]`GitHub`. + +==== + +. Download an IAM policy for the [.noloc]`{aws} Load Balancer Controller` that allows it to make calls to {aws} APIs on your behalf. ++ +==== +[role="tablist"] +{aws}::: ++ +[source,shell,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.11.0/docs/install/iam_policy.json +---- + + +{aws} GovCloud (US)::: ++ +[source,shell,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.11.0/docs/install/iam_policy_us-gov.json +---- ++ +[source,shell,subs="verbatim,attributes"] +---- +mv iam_policy_us-gov.json iam_policy.json +---- +==== +. Create an IAM policy using the policy downloaded in the previous step. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws iam create-policy \ + --policy-name AWSLoadBalancerControllerIAMPolicy \ + --policy-document file://iam_policy.json +---- ++ +NOTE: If you view the policy in the {aws-management-console}, the console shows warnings for the *ELB* service, but not for the *ELB v2* service. This happens because some of the actions in the policy exist for *ELB v2*, but not for *ELB*. You can ignore the warnings for *ELB*. + +==== +[role="tablist"] +eksctl::: +.. Replace [.replaceable]`my-cluster` with the name of your cluster, [.replaceable]`111122223333` with your account ID, and then run the command. If your cluster is in the {aws} GovCloud (US-East) or {aws} GovCloud (US-West) {aws} Regions, then replace `{arn-aws}` with `arn:aws-us-gov:`. ++ +[source,shell,subs="verbatim,attributes"] +---- +eksctl create iamserviceaccount \ + --cluster=my-cluster \ + --namespace=kube-system \ + --name=aws-load-balancer-controller \ + --role-name AmazonEKSLoadBalancerControllerRole \ + --attach-policy-arn={arn-aws}iam::111122223333:policy/AWSLoadBalancerControllerIAMPolicy \ + --approve +---- + + +{aws} CLI and kubectl::: +.. Retrieve your cluster's [.noloc]`OIDC` provider ID and store it in a variable. ++ +[source,bash,subs="verbatim,attributes"] +---- +oidc_id=$(aws eks describe-cluster --name my-cluster --query "cluster.identity.oidc.issuer" --output text | cut -d '/' -f 5) +---- +.. Determine whether an IAM [.noloc]`OIDC` provider with your cluster's ID is already in your account. You need [.noloc]`OIDC` configured for both the cluster and IAM. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws iam list-open-id-connect-providers | grep $oidc_id | cut -d "/" -f4 +---- ++ +If output is returned, then you already have an IAM [.noloc]`OIDC` provider for your cluster. If no output is returned, then you must create an IAM [.noloc]`OIDC` provider for your cluster. For more information, see <>. +.. Copy the following contents to your device. Replace [.replaceable]`111122223333` with your account ID. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. Replace [.replaceable]`EXAMPLED539D4633E53DE1B71EXAMPLE` with the output returned in the previous step. If your cluster is in the {aws} GovCloud (US-East) or {aws} GovCloud (US-West) {aws} Regions, then replace `{arn-aws}` with `arn:aws-us-gov:`. After replacing the text, run the modified command to create the `load-balancer-role-trust-policy.json` file. ++ +[source,json,subs="verbatim,attributes"] +---- +cat >load-balancer-role-trust-policy.json <aws-load-balancer-controller-service-account.yaml <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +quay.io/jetstack/cert-manager-cainjector:v1.13.5 +quay.io/jetstack/cert-manager-controller:v1.13.5 +quay.io/jetstack/cert-manager-webhook:v1.13.5 +---- +.. Replace `quay.io` in the manifest for the three images with your own registry name. The following command assumes that your private repository's name is the same as the source repository. Replace [.replaceable]`111122223333.dkr.ecr.region-code.amazonaws.com` with your private registry. ++ +[source,shell,subs="verbatim,attributes"] +---- +sed -i.bak -e 's|quay.io|111122223333.dkr.ecr.region-code.amazonaws.com|' ./cert-manager.yaml +---- +.. Apply the manifest. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl apply \ + --validate=false \ + -f ./cert-manager.yaml +---- +==== + + +[[lbc-install,lbc-install.title]] +==== Step 3: Install [.noloc]`{aws} Load Balancer Controller` +. Download the controller specification. For more information about the controller, see the https://kubernetes-sigs.github.io/aws-load-balancer-controller/[documentation] on [.noloc]`GitHub`. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -Lo v2_11_0_full.yaml https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.11.0/v2_11_0_full.yaml +---- +. Make the following edits to the file. ++ +.. If you downloaded the `v2_11_0_full.yaml` file, run the following command to remove the `ServiceAccount` section in the manifest. If you don't remove this section, the required annotation that you made to the service account in a previous step is overwritten. Removing this section also preserves the service account that you created in a previous step if you delete the controller. ++ +[source,shell,subs="verbatim,attributes"] +---- +sed -i.bak -e '690,698d' ./v2_11_0_full.yaml +---- ++ +If you downloaded a different file version, then open the file in an editor and remove the following lines. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: aws-load-balancer-controller + name: aws-load-balancer-controller + namespace: kube-system +--- +---- +.. Replace `your-cluster-name` in the `Deployment` `spec` section of the file with the name of your cluster by replacing [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +sed -i.bak -e 's|your-cluster-name|my-cluster|' ./v2_11_0_full.yaml +---- +.. If your nodes don't have access to the Amazon EKS Amazon ECR image repositories, then you need to pull the following image and push it to a repository that your nodes have access to. For more information on how to pull, tag, and push an image to your own repository, see <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +public.ecr.aws/eks/aws-load-balancer-controller:v2.11.0 +---- ++ +Add your registry's name to the manifest. The following command assumes that your private repository's name is the same as the source repository and adds your private registry's name to the file. Replace [.replaceable]`111122223333.dkr.ecr.region-code.amazonaws.com` with your registry. This line assumes that you named your private repository the same as the source repository. If not, change the `eks/aws-load-balancer-controller` text after your private registry name to your repository name. ++ +[source,shell,subs="verbatim,attributes"] +---- +sed -i.bak -e 's|public.ecr.aws/eks/aws-load-balancer-controller|111122223333.dkr.ecr.region-code.amazonaws.com/eks/aws-load-balancer-controller|' ./v2_11_0_full.yaml +---- +.. (Required only for Fargate or Restricted IMDS) ++ +If you're deploying the controller to Amazon EC2 nodes that have https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[restricted access to the Amazon EC2 instance metadata service (IMDS)], or if you're deploying to Fargate or Amazon EKS Hybrid Nodes, then add the `following parameters` under `- args:`. ++ +[source,yaml,subs="verbatim,attributes"] +---- +[...] +spec: + containers: + - args: + - --cluster-name=your-cluster-name + - --ingress-class=alb + - --aws-vpc-id=vpc-xxxxxxxx + - --aws-region=region-code + + +[...] +---- +. Apply the file. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl apply -f v2_11_0_full.yaml +---- +. Download the `IngressClass` and `IngressClassParams` manifest to your cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +curl -Lo v2_11_0_ingclass.yaml https://github.com/kubernetes-sigs/aws-load-balancer-controller/releases/download/v2.11.0/v2_11_0_ingclass.yaml +---- +. Apply the manifest to your cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl apply -f v2_11_0_ingclass.yaml +---- + + +[[lbc-verify,lbc-verify.title]] +==== Step 4: Verify that the controller is installed +. Verify that the controller is installed. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl get deployment -n kube-system aws-load-balancer-controller +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY UP-TO-DATE AVAILABLE AGE +aws-load-balancer-controller 2/2 2 2 84s +---- ++ +You receive the previous output if you deployed using Helm. If you deployed using the [.noloc]`Kubernetes` manifest, you only have one replica. +. Before using the controller to provision {aws} resources, your cluster must meet specific requirements. For more information, see <> and <>. + + +[.topic] +[[lbc-remove,lbc-remove.title]] +=== Migrate apps from deprecated ALB [.noloc]`Ingress Controller` + +[abstract] +-- +Learn how to migrate from the deprecated ALB Ingress Controller to the latest [.noloc]`{aws} Load Balancer Controller` release, ensuring smooth transition and uninterrupted load balancing capabilities. +-- + +This topic describes how to migrate from deprecated controller versions. More specifically, it describes how to remove deprecated versions of the [.noloc]`{aws} Load Balancer Controller`. + + + +* Deprecated versions cannot be upgraded. You must remove them first, and then install a current version. ++ +[[lbc-deprecated-list]] +* Deprecated versions include: ++ +** {aws} ALB Ingress Controller for [.noloc]`Kubernetes` ("Ingress Controller"), a predecessor to the [.noloc]`{aws} Load Balancer Controller`. +** Any `0.1.[.replaceable]``x``` version of the [.noloc]`{aws} Load Balancer Controller` + + +[[lbc-remove-desc,lbc-remove-desc.title]] +==== Remove the deprecated controller version + +[NOTE] +==== + +You may have installed the deprecated version using Helm or manually with [.noloc]`Kubernetes` manifests. Complete the procedure using the tool that you originally installed it with. + +==== +. If you installed the `incubator/aws-alb-ingress-controller` Helm chart, uninstall it. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm delete aws-alb-ingress-controller -n kube-system +---- +. If you have version `0.1.[.replaceable]``x``` of the `eks-charts/aws-load-balancer-controller` chart installed, uninstall it. The upgrade from `0.1.[.replaceable]``x``` to version `1.0.0` doesn't work due to incompatibility with the webhook API version. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm delete aws-load-balancer-controller -n kube-system +---- +. Check to see if the controller is currently installed. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl get deployment -n kube-system alb-ingress-controller +---- ++ +This is the output if the controller isn't installed. ++ ++ +This is the output if the controller is installed. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAME READY UP-TO-DATE AVAILABLE AGE +alb-ingress-controller 1/1 1 1 122d +---- +. Enter the following commands to remove the controller. ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.8/docs/examples/alb-ingress-controller.yaml +kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.8/docs/examples/rbac-role.yaml +---- + + +[[lbc-migrate,lbc-migrate.title]] +==== Migrate to [.noloc]`{aws} Load Balancer Controller` + +To migrate from the ALB Ingress Controller for [.noloc]`Kubernetes` to the [.noloc]`{aws} Load Balancer Controller`, you need to: + +. Remove the ALB Ingress Controller (see above). +. <> +. Add an additional policy to the IAM Role used by the [.noloc]`{aws} Load Balancer Controller`. This policy permits the LBC to manage resources created by the ALB Ingress Controller for [.noloc]`Kubernetes`. +. Download the IAM policy. This policy permits the [.noloc]`{aws} Load Balancer Controller` to manage resources created by the ALB Ingress Controller for [.noloc]`Kubernetes`. You can also https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy_v1_to_v2_additional.json[view the policy]. ++ +[source,shell,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.11.0/docs/install/iam_policy_v1_to_v2_additional.json +---- +. If your cluster is in the {aws} GovCloud (US-East) or {aws} GovCloud (US-West) {aws} Regions, then replace `{arn-aws}` with `arn:aws-us-gov:`.. ++ +[source,shell,subs="verbatim,attributes"] +---- +sed -i.bak -e 's|{arn-aws}|arn:aws-us-gov:|' iam_policy_v1_to_v2_additional.json +---- +. Create the IAM policy and note the ARN that is returned. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws iam create-policy \ + --policy-name AWSLoadBalancerControllerAdditionalIAMPolicy \ + --policy-document file://iam_policy_v1_to_v2_additional.json +---- +. Attach the IAM policy to the IAM role used by the [.noloc]`{aws} Load Balancer Controller`. Replace [.replaceable]`your-role-name` with the name of the role, such as `AmazonEKSLoadBalancerControllerRole`. ++ +If you created the role using `eksctl`, then to find the role name that was created, open the link:cloudformation[{aws} CloudFormation console,type="console"] and select the *eksctl-[.replaceable]`my-cluster`-addon-iamserviceaccount-kube-system-aws-load-balancer-controller* stack. Select the *Resources* tab. The role name is in the *Physical ID* column. If your cluster is in the {aws} GovCloud (US-East) or {aws} GovCloud (US-West) {aws} Regions, then replace `{arn-aws}` with `arn:aws-us-gov:`. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws iam attach-role-policy \ + --role-name your-role-name \ + --policy-arn {arn-aws}iam::111122223333:policy/AWSLoadBalancerControllerAdditionalIAMPolicy +---- + + +[.topic] +[[managing-coredns,managing-coredns.title]] +== Manage CoreDNS for DNS in Amazon EKS clusters + +[abstract] +-- +Learn how to manage the [.noloc]`CoreDNS` Amazon EKS add-on for DNS service discovery in [.noloc]`Kubernetes` clusters with configuration updates and version upgrades. +-- + +[TIP] +==== +With Amazon EKS Auto Mode, you don't need to install or upgrade networking add-ons. Auto Mode includes pod networking and load balancing capabilities. + +For more information, see <>. +==== + +[.noloc]`CoreDNS` is a flexible, extensible DNS server that can serve as the [.noloc]`Kubernetes` cluster DNS. When you launch an Amazon EKS cluster with at least one node, two replicas of the [.noloc]`CoreDNS` image are deployed by default, regardless of the number of nodes deployed in your cluster. The [.noloc]`CoreDNS` [.noloc]`Pods` provide name resolution for all [.noloc]`Pods` in the cluster. The [.noloc]`CoreDNS` [.noloc]`Pods` can be deployed to Fargate nodes if your cluster includes a Fargate Profile with a namespace that matches the namespace for the [.noloc]`CoreDNS` `deployment`. For more information on Fargate Profiles, see <>. For more information about [.noloc]`CoreDNS`, see https://kubernetes.io/docs/tasks/administer-cluster/coredns/[Using CoreDNS for Service Discovery] in the [.noloc]`Kubernetes` documentation. + +[[coredns-versions,coredns-versions.title]] +=== [.noloc]`CoreDNS` versions + +The following table lists the latest version of the Amazon EKS add-on type for each [.noloc]`Kubernetes` version. + +[options="header"] +|=== +| Kubernetes version | [.noloc]`CoreDNS` version +| 1.31 | v1.11.4-eksbuild.2 +| 1.30 | v1.11.4-eksbuild.2 +| 1.29 | v1.11.4-eksbuild.2 +| 1.28 | v1.10.1-eksbuild.17 +| 1.27 | v1.10.1-eksbuild.17 +| 1.26 | v1.9.3-eksbuild.21 +| 1.25 | v1.9.3-eksbuild.21 +| 1.24 | v1.9.3-eksbuild.21 +| 1.23 | v1.8.7-eksbuild.20 +|=== + +[IMPORTANT] +==== + +If you're self-managing this add-on, the versions in the table might not be the same as the available self-managed versions. For more information about updating the self-managed type of this add-on, see <>. + +==== + +[[coredns-upgrade,coredns-upgrade.title]] +=== Important [.noloc]`CoreDNS` upgrade considerations + +* To improve the stability and availability of the [.noloc]`CoreDNS` [.noloc]`Deployment`, versions `v1.9.3-eksbuild.6` and later and `v1.10.1-eksbuild.3` are deployed with a `PodDisruptionBudget`. If you've deployed an existing `PodDisruptionBudget`, your upgrade to these versions might fail. If the upgrade fails, completing one of the following tasks should resolve the issue: ++ +** When doing the upgrade of the Amazon EKS add-on, choose to override the existing settings as your conflict resolution option. If you've made other custom settings to the [.noloc]`Deployment`, make sure to back up your settings before upgrading so that you can reapply your other custom settings after the upgrade. +** Remove your existing `PodDisruptionBudget` and try the upgrade again. +* In EKS add-on versions `v1.9.3-eksbuild.3` and later and `v1.10.1-eksbuild.6` and later, the [.noloc]`CoreDNS` [.noloc]`Deployment` sets the `readinessProbe` to use the `/ready` endpoint. This endpoint is enabled in the `Corefile` configuration file for [.noloc]`CoreDNS`. ++ +If you use a custom `Corefile`, you must add the `ready` plugin to the config, so that the `/ready` endpoint is active in [.noloc]`CoreDNS` for the probe to use. +* In EKS add-on versions `v1.9.3-eksbuild.7` and later and `v1.10.1-eksbuild.4` and later, you can change the `PodDisruptionBudget`. You can edit the add-on and change these settings in the *Optional configuration settings* using the fields in the following example. This example shows the default `PodDisruptionBudget`. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "podDisruptionBudget": { + "enabled": true, + "maxUnavailable": 1 + } +} +---- +// Not using [.noloc]`Kubernetes` here because the _ causes issues with the rendering. ++ +You can set `maxUnavailable` or `minAvailable`, but you can't set both in a single `PodDisruptionBudget`. For more information about `PodDisruptionBudgets`, see https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget[Specifying a PodDisruptionBudget] in the _Kubernetes documentation_. ++ +Note that if you set `enabled` to `false`, the `PodDisruptionBudget` isn't removed. After you set this field to `false`, you must delete the `PodDisruptionBudget` object. Similarly, if you edit the add-on to use an older version of the add-on (downgrade the add-on) after upgrading to a version with a `PodDisruptionBudget`, the `PodDisruptionBudget` isn't removed. To delete the `PodDisruptionBudget`, you can run the following command: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete poddisruptionbudget coredns -n kube-system +---- +* In EKS add-on versions `v1.10.1-eksbuild.5` and later, change the default toleration from `node-role.kubernetes.io/master:NoSchedule` to `node-role.kubernetes.io/control-plane:NoSchedule` to comply with KEP 2067. For more information about KEP 2067, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint#renaming-the-node-rolekubernetesiomaster-node-taint[KEP-2067: Rename the kubeadm "master" label and taint] in the _Kubernetes Enhancement Proposals (KEPs)_ on [.noloc]`GitHub`. ++ +In EKS add-on versions `v1.8.7-eksbuild.8` and later and `v1.9.3-eksbuild.9` and later, both tolerations are set to be compatible with every [.noloc]`Kubernetes` version. +* In EKS add-on versions `v1.9.3-eksbuild.11` and `v1.10.1-eksbuild.7` and later, the [.noloc]`CoreDNS` [.noloc]`Deployment` sets a default value for `topologySpreadConstraints`. The default value ensures that the [.noloc]`CoreDNS` [.noloc]`Pods` are spread across the Availability Zones if there are nodes in multiple Availability Zones available. You can set a custom value that will be used instead of the default value. The default value follows: ++ +[source,yaml,subs="verbatim,attributes"] +---- +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + k8s-app: kube-dns +---- + + +[[coredns-upgrade-1.11,coredns-upgrade-1.11.title]] +==== [.noloc]`CoreDNS` `v1.11` upgrade considerations + +* In EKS add-on versions `v1.11.1-eksbuild.4` and later, the container image is based on a https://gallery.ecr.aws/eks-distro-build-tooling/eks-distro-minimal-base[minimal base image] maintained by Amazon EKS Distro, which contains minimal packages and doesn't have shells. For more information, see https://distro.eks.amazonaws.com/[Amazon EKS Distro]. The usage and troubleshooting of the [.noloc]`CoreDNS` image remains the same. + + +[.topic] +[[coredns-add-on-create,coredns-add-on-create.title]] +=== Create the [.noloc]`CoreDNS` Amazon EKS add-on + +Create the [.noloc]`CoreDNS` Amazon EKS add-on. You must have a cluster before you create the add-on. For more information, see <>. + +. See which version of the add-on is installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe deployment coredns --namespace kube-system | grep coredns: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.10.1-eksbuild.13 +---- +. See which type of the add-on is installed on your cluster. Depending on the tool that you created your cluster with, you might not currently have the Amazon EKS add-on type installed on your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns --query addon.addonVersion --output text +---- ++ +If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster and don't need to complete the remaining steps in this procedure. If an error is returned, you don't have the Amazon EKS type of the add-on installed on your cluster. Complete the remaining steps of this procedure to install it. +. Save the configuration of your currently installed add-on. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get deployment coredns -n kube-system -o yaml > aws-k8s-coredns-old.yaml +---- +. Create the add-on using the {aws} CLI. If you want to use the {aws-management-console} or `eksctl` to create the add-on, see <> and specify `coredns` for the add-on name. Copy the command that follows to your device. Make the following modifications to the command, as needed, and then run the modified command. ++ +** Replace [.replaceable]`my-cluster` with the name of your cluster. +** Replace [.replaceable]`v1.11.3-eksbuild.1` with the latest version listed in the <> for your cluster version. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-addon --cluster-name my-cluster --addon-name coredns --addon-version v1.11.3-eksbuild.1 +---- ++ +If you've applied custom settings to your current add-on that conflict with the default settings of the Amazon EKS add-on, creation might fail. If creation fails, you receive an error that can help you resolve the issue. Alternatively, you can add `--resolve-conflicts OVERWRITE` to the previous command. This allows the add-on to overwrite any existing custom settings. Once you've created the add-on, you can update it with your custom settings. +. Confirm that the latest version of the add-on for your cluster's [.noloc]`Kubernetes` version was added to your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns --query addon.addonVersion --output text +---- ++ +It might take several seconds for add-on creation to complete. ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +v1.11.3-eksbuild.1 +---- +. If you made custom settings to your original add-on, before you created the Amazon EKS add-on, use the configuration that you saved in a previous step to update the Amazon EKS add-on with your custom settings. For instructions to update the add-on, see <>. + + +[.topic] +[[coredns-add-on-update,coredns-add-on-update.title]] +=== Update the [.noloc]`CoreDNS` Amazon EKS add-on + +Update the Amazon EKS type of the add-on. If you haven't added the Amazon EKS add-on to your cluster, either <> or see <>. + +Before you begin, review the upgrade considerations. For more information, see <>. + +. See which version of the add-on is installed on your cluster. Replace [.replaceable]`my-cluster` with your cluster name. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns --query "addon.addonVersion" --output text +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.10.1-eksbuild.13 +---- ++ +If the version returned is the same as the version for your cluster's [.noloc]`Kubernetes` version in the <>, then you already have the latest version installed on your cluster and don't need to complete the rest of this procedure. If you receive an error, instead of a version number in your output, then you don't have the Amazon EKS type of the add-on installed on your cluster. You need to <> before you can update it with this procedure. +. Save the configuration of your currently installed add-on. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get deployment coredns -n kube-system -o yaml > aws-k8s-coredns-old.yaml +---- +. Update your add-on using the {aws} CLI. If you want to use the {aws-management-console} or `eksctl` to update the add-on, see <>. Copy the command that follows to your device. Make the following modifications to the command, as needed, and then run the modified command. ++ +** Replace [.replaceable]`my-cluster` with the name of your cluster. +** Replace [.replaceable]`v1.11.3-eksbuild.1` with the latest version listed in the <> for your cluster version. +** The `--resolve-conflicts[.replaceable]``PRESERVE``` option preserves existing configuration values for the add-on. If you've set custom values for add-on settings, and you don't use this option, Amazon EKS overwrites your values with its default values. If you use this option, then we recommend testing any field and value changes on a non-production cluster before updating the add-on on your production cluster. If you change this value to `OVERWRITE`, all settings are changed to Amazon EKS default values. If you've set custom values for any settings, they might be overwritten with Amazon EKS default values. If you change this value to `none`, Amazon EKS doesn't change the value of any settings, but the update might fail. If the update fails, you receive an error message to help you resolve the conflict. +** If you're not updating a configuration setting, remove `--configuration-values '{[.replaceable]``"replicaCount":3``}'` from the command. If you're updating a configuration setting, replace [.replaceable]`"replicaCount":3` with the setting that you want to set. In this example, the number of replicas of [.noloc]`CoreDNS` is set to `3`. The value that you specify must be valid for the configuration schema. If you don't know the configuration schema, run `aws eks describe-addon-configuration --addon-name coredns --addon-version [.replaceable]``v1.11.3-eksbuild.1```, replacing [.replaceable]`v1.11.3-eksbuild.1` with the version number of the add-on that you want to see the configuration for. The schema is returned in the output. If you have any existing custom configuration, want to remove it all, and set the values for all settings back to Amazon EKS defaults, remove [.replaceable]`"replicaCount":3` from the command, so that you have empty `{}`. For more information about [.noloc]`CoreDNS` settings, see https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/[Customizing DNS Service] in the [.noloc]`Kubernetes` documentation. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name coredns --addon-version v1.11.3-eksbuild.1 \ + --resolve-conflicts PRESERVE --configuration-values '{"replicaCount":3}' +---- ++ +It might take several seconds for the update to complete. +. Confirm that the add-on version was updated. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns +---- ++ +It might take several seconds for the update to complete. ++ +An example output is as follows. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "addon": { + "addonName": "coredns", + "clusterName": "my-cluster", + "status": "ACTIVE", + "addonVersion": "v1.11.3-eksbuild.1", + "health": { + "issues": [] + }, + "addonArn": "{arn-aws}eks:region:111122223333:addon/my-cluster/coredns/d2c34f06-1111-2222-1eb0-24f64ce37fa4", + "createdAt": "2023-03-01T16:41:32.442000+00:00", + "modifiedAt": "2023-03-01T18:16:54.332000+00:00", + "tags": {}, + "configurationValues": "{\"replicaCount\":3}" + } +} +---- + + +[.topic] +[[coredns-add-on-self-managed-update,coredns-add-on-self-managed-update.title]] +=== Update the [.noloc]`CoreDNS` Amazon EKS self-managed add-on + +[IMPORTANT] +==== + +We recommend adding the Amazon EKS type of the add-on to your cluster instead of using the self-managed type of the add-on. If you're not familiar with the difference between the types, see <>. For more information about adding an Amazon EKS add-on to your cluster, see <>. If you're unable to use the Amazon EKS add-on, we encourage you to submit an issue about why you can't to the https://github.com/aws/containers-roadmap/issues[Containers roadmap GitHub repository]. + +==== + +Before you begin, review the upgrade considerations. For more information, see <>. + +. Confirm that you have the self-managed type of the add-on installed on your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns --query addon.addonVersion --output text +---- ++ +If an error message is returned, you have the self-managed type of the add-on installed on your cluster. Complete the remaining steps in this procedure. If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster. To update the Amazon EKS type of the add-on, use the procedure in <>, rather than using this procedure. If you're not familiar with the differences between the add-on types, see <>. +. See which version of the container image is currently installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe deployment coredns -n kube-system | grep Image | cut -d ":" -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.8.7-eksbuild.2 +---- +. If your current [.noloc]`CoreDNS` version is `v1.5.0` or later, but earlier than the version listed in the <> table, then skip this step. If your current version is earlier than `1.5.0`, then you need to modify the `ConfigMap` for [.noloc]`CoreDNS` to use the forward add-on, rather than the proxy add-on. ++ +.. Open the `ConfigMap` with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit configmap coredns -n kube-system +---- +.. Replace `proxy` in the following line with `forward`. Save the file and exit the editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +proxy . /etc/resolv.conf +---- +. If you originally deployed your cluster on [.noloc]`Kubernetes` `1.17` or earlier, then you may need to remove a discontinued line from your [.noloc]`CoreDNS` manifest. ++ +IMPORTANT: You must complete this step before updating to [.noloc]`CoreDNS` version `1.7.0`, but it's recommended that you complete this step even if you're updating to an earlier version. ++ +.. Check to see if your [.noloc]`CoreDNS` manifest has the line. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get configmap coredns -n kube-system -o jsonpath='{$.data.Corefile}' | grep upstream +---- ++ +If no output is returned, your manifest doesn't have the line and you can skip to the next step to update [.noloc]`CoreDNS`. If output is returned, then you need to remove the line. +.. Edit the `ConfigMap` with the following command, removing the line in the file that has the word `upstream` in it. Do not change anything else in the file. Once the line is removed, save the changes. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit configmap coredns -n kube-system -o yaml +---- +. Retrieve your current [.noloc]`CoreDNS` image version: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe deployment coredns -n kube-system | grep Image +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +602401143452.dkr.ecr.region-code.amazonaws.com/eks/coredns:v1.8.7-eksbuild.2 +---- +. If you're updating to [.noloc]`CoreDNS` `1.8.3` or later, then you need to add the `endpointslices` permission to the `system:coredns` [.noloc]`Kubernetes` `clusterrole`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit clusterrole system:coredns -n kube-system +---- ++ +Add the following lines under the existing permissions lines in the `rules` section of the file. ++ +[source,yaml,subs="verbatim,attributes"] +---- +[...] +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +[...] +---- +. Update the [.noloc]`CoreDNS` add-on by replacing [.replaceable]`602401143452` and [.replaceable]`region-code` with the values from the output returned in a previous step. Replace [.replaceable]`v1.11.3-eksbuild.1` with the [.noloc]`CoreDNS` version listed in the <> for your [.noloc]`Kubernetes` version. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set image deployment.apps/coredns -n kube-system coredns=602401143452.dkr.ecr.region-code.amazonaws.com/eks/coredns:v1.11.3-eksbuild.1 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +deployment.apps/coredns image updated +---- +. Check the container image version again to confirm that it was updated to the version that you specified in the previous step. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe deployment coredns -n kube-system | grep Image | cut -d ":" -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.11.3-eksbuild.1 +---- + + +[.topic] +[[coredns-autoscaling,coredns-autoscaling.title]] +=== Scale [.noloc]`CoreDNS` [.noloc]`Pods` for high DNS traffic + +[abstract] +-- +Learn how the Amazon EKS add-on for [.noloc]`CoreDNS` autoscales to handle increased load on DNS pods, improving application availability and cluster scalability. +-- + +When you launch an Amazon EKS cluster with at least one node, a [.noloc]`Deployment` of two replicas of the [.noloc]`CoreDNS` image are deployed by default, regardless of the number of nodes deployed in your cluster. The [.noloc]`CoreDNS` Pods provide name resolution for all Pods in the cluster. Applications use name resolution to connect to pods and services in the cluster as well as connecting to services outside the cluster. As the number of requests for name resolution (queries) from pods increase, the [.noloc]`CoreDNS` pods can get overwhelmed and slow down, and reject requests that the pods can`'t handle. + +To handle the increased load on the [.noloc]`CoreDNS` pods, consider an autoscaling system for [.noloc]`CoreDNS`. Amazon EKS can manage the autoscaling of the [.noloc]`CoreDNS` Deployment in the EKS Add-on version of [.noloc]`CoreDNS`. This [.noloc]`CoreDNS` autoscaler continuously monitors the cluster state, including the number of nodes and CPU cores. Based on that information, the controller will dynamically adapt the number of replicas of the [.noloc]`CoreDNS` deployment in an EKS cluster. This feature works for [.noloc]`CoreDNS` `v1.9` and EKS release version `1.25` and later. For more information about which versions are compatible with [.noloc]`CoreDNS` Autoscaling, see the following section. + +We recommend using this feature in conjunction with other https://aws.github.io/aws-eks-best-practices/cluster-autoscaling/[EKS Cluster Autoscaling best practices] to improve overall application availability and cluster scalability. + +[[coredns-autoscaling-prereqs,coredns-autoscaling-prereqs.title]] +==== Prerequisites + +For Amazon EKS to scale your [.noloc]`CoreDNS` deployment, there are three prerequisites: + + + +* You must be using the _EKS Add-on_ version of [.noloc]`CoreDNS`. +* Your cluster must be running at least the minimum cluster versions and platform versions. +* Your cluster must be running at least the minimum version of the EKS Add-on of [.noloc]`CoreDNS`. + + +[[coredns-autoscaling-cluster-version,coredns-autoscaling-cluster-version.title]] +===== Minimum cluster version + +Autoscaling of [.noloc]`CoreDNS` is done by a new component in the cluster control plane, managed by Amazon EKS. Because of this, you must upgrade your cluster to an EKS release that supports the minimum platform version that has the new component. + +A new Amazon EKS cluster. To deploy one, see <>. The cluster must be [.noloc]`Kubernetes` version `1.25` or later. The cluster must be running one of the [.noloc]`Kubernetes` versions and platform versions listed in the following table or a later version. Note that any [.noloc]`Kubernetes` and platform versions later than those listed are also supported. You can check your current [.noloc]`Kubernetes` version by replacing [.replaceable]`my-cluster` in the following command with the name of your cluster and then running the modified command: + +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster + --name my-cluster --query cluster.version --output + text +---- + +[cols="1,1", options="header"] +|=== +|Kubernetes version +|Platform version + + +|`1.29.3` +|`eks.7` + +|`1.28.8` +|`eks.13` + +|`1.27.12` +|`eks.17` + +|`1.26.15` +|`eks.18` + +|`1.25.16` +|`eks.19` +|=== + +[NOTE] +==== + +Every platform version of later [.noloc]`Kubernetes` versions are also supported, for example [.noloc]`Kubernetes` version `1.30` from `eks.1` and on. + +==== + +[[coredns-autoscaling-coredns-version,coredns-autoscaling-coredns-version.title]] +===== Minimum EKS Add-on version + +[cols="1,1,1,1,1,1", options="header"] +|=== +|Kubernetes version +|1.29 +|1.28 +|1.27 +|1.26 +|1.25 + + +| +|`v1.11.1-eksbuild.9` +|`v1.10.1-eksbuild.11` +|`v1.10.1-eksbuild.11` +|`v1.9.3-eksbuild.15` +|`v1.9.3-eksbuild.15` +|=== + + +[[coredns-autoscaling-console,coredns-autoscaling-console.title]] +.Configuring [.noloc]`CoreDNS` autoscaling in the {aws-management-console} +[%collapsible] +==== +. Ensure that your cluster is at or above the minimum cluster version. ++ +Amazon EKS upgrades clusters between platform versions of the same [.noloc]`Kubernetes` version automatically, and you can`'t start this process yourself. Instead, you can upgrade your cluster to the next [.noloc]`Kubernetes` version, and the cluster will be upgraded to that K8s version and the latest platform version. For example, if you upgrade from `1.25` to `1.26`, the cluster will upgrade to `1.26.15 eks.18`. ++ +New [.noloc]`Kubernetes` versions sometimes introduce significant changes. Therefore, we recommend that you test the behavior of your applications by using a separate cluster of the new [.noloc]`Kubernetes` version before you update your production clusters. ++ +To upgrade a cluster to a new [.noloc]`Kubernetes` version, follow the procedure in <>. +. Ensure that you have the EKS Add-on for [.noloc]`CoreDNS`, not the self-managed [.noloc]`CoreDNS` Deployment. ++ +Depending on the tool that you created your cluster with, you might not currently have the Amazon EKS add-on type installed on your cluster. To see which type of the add-on is installed on your cluster, you can run the following command. Replace `my-cluster` with the name of your cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns --query addon.addonVersion --output text +---- ++ +If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster and you can continue with the next step. If an error is returned, you don't have the Amazon EKS type of the add-on installed on your cluster. Complete the remaining steps of the procedure <> to replace the self-managed version with the Amazon EKS add-on. +. Ensure that your EKS Add-on for [.noloc]`CoreDNS` is at a version the same or higher than the minimum EKS Add-on version. ++ +See which version of the add-on is installed on your cluster. You can check in the {aws-management-console} or run the following command: ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl describe deployment coredns --namespace kube-system | grep coredns: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.10.1-eksbuild.13 +---- ++ +Compare this version with the minimum EKS Add-on version in the previous section. If needed, upgrade the EKS Add-on to a higher version by following the procedure <>. +. Add the autoscaling configuration to the *Optional configuration settings* of the EKS Add-on. ++ +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the add-on for. +.. Choose the *Add-ons* tab. +.. Select the box in the top right of the [.noloc]`CoreDNS` add-on box and then choose *Edit*. +.. On the *Configure [.noloc]`CoreDNS`* page: ++ +... Select the *Version* that you'd like to use. We recommend that you keep the same version as the previous step, and update the version and configuration in separate actions. +... Expand the *Optional configuration settings*. +... Enter the JSON key `"autoscaling":` and value of a nested JSON object with a key `"enabled":` and value `true` in *Configuration values*. The resulting text must be a valid JSON object. If this key and value are the only data in the text box, surround the key and value with curly braces `{ }`. The following example shows autoscaling is enabled: ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "autoScaling": { + "enabled": true + } +} +---- +... (Optional) You can provide minimum and maximum values that autoscaling can scale the number of [.noloc]`CoreDNS` pods to. ++ +The following example shows autoscaling is enabled and all of the optional keys have values. We recommend that the minimum number of [.noloc]`CoreDNS` pods is always greater than 2 to provide resilience for the DNS service in the cluster. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "autoScaling": { + "enabled": true, + "minReplicas": 2, + "maxReplicas": 10 + } +} +---- +.. To apply the new configuration by replacing the [.noloc]`CoreDNS` pods, choose *Save changes*. ++ +Amazon EKS applies changes to the EKS Add-ons by using a _rollout_ of the [.noloc]`Kubernetes` Deployment for CoreDNS. You can track the status of the rollout in the *Update history* of the add-on in the {aws-management-console} and with `kubectl rollout status deployment/coredns --namespace kube-system`. ++ +`kubectl rollout` has the following commands: ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl rollout + +history -- View rollout history +pause -- Mark the provided resource as paused +restart -- Restart a resource +resume -- Resume a paused resource +status -- Show the status of the rollout +undo -- Undo a previous rollout +---- ++ +If the rollout takes too long, Amazon EKS will undo the rollout, and a message with the type of *Addon Update* and a status of *Failed* will be added to the *Update history* of the add-on. To investigate any issues, start from the history of the rollout, and run `kubectl logs` on a [.noloc]`CoreDNS` pod to see the logs of [.noloc]`CoreDNS`. +. If the new entry in the *Update history* has a status of *Successful*, then the rollout has completed and the add-on is using the new configuration in all of the [.noloc]`CoreDNS` pods. As you change the number of nodes and CPU cores of nodes in the cluster, Amazon EKS scales the number of replicas of the [.noloc]`CoreDNS` deployment. + +==== + +[[coredns-autoscaling-cli,coredns-autoscaling-cli.title]] +.Configuring [.noloc]`CoreDNS` autoscaling in the {aws} Command Line Interface +[%collapsible] +==== +. Ensure that your cluster is at or above the minimum cluster version. ++ +Amazon EKS upgrades clusters between platform versions of the same [.noloc]`Kubernetes` version automatically, and you can`'t start this process yourself. Instead, you can upgrade your cluster to the next [.noloc]`Kubernetes` version, and the cluster will be upgraded to that K8s version and the latest platform version. For example, if you upgrade from `1.25` to `1.26`, the cluster will upgrade to `1.26.15 eks.18`. ++ +New [.noloc]`Kubernetes` versions sometimes introduce significant changes. Therefore, we recommend that you test the behavior of your applications by using a separate cluster of the new [.noloc]`Kubernetes` version before you update your production clusters. ++ +To upgrade a cluster to a new [.noloc]`Kubernetes` version, follow the procedure in <>. +. Ensure that you have the EKS Add-on for [.noloc]`CoreDNS`, not the self-managed [.noloc]`CoreDNS` Deployment. ++ +Depending on the tool that you created your cluster with, you might not currently have the Amazon EKS add-on type installed on your cluster. To see which type of the add-on is installed on your cluster, you can run the following command. Replace `my-cluster` with the name of your cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns --query addon.addonVersion --output text +---- ++ +If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster. If an error is returned, you don't have the Amazon EKS type of the add-on installed on your cluster. Complete the remaining steps of the procedure <> to replace the self-managed version with the Amazon EKS add-on. +. Ensure that your EKS Add-on for [.noloc]`CoreDNS` is at a version the same or higher than the minimum EKS Add-on version. ++ +See which version of the add-on is installed on your cluster. You can check in the {aws-management-console} or run the following command: ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl describe deployment coredns --namespace kube-system | grep coredns: | cut -d : -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.10.1-eksbuild.13 +---- ++ +Compare this version with the minimum EKS Add-on version in the previous section. If needed, upgrade the EKS Add-on to a higher version by following the procedure <>. +. Add the autoscaling configuration to the *Optional configuration settings* of the EKS Add-on. ++ +Run the following {aws} CLI command. Replace `my-cluster` with the name of your cluster and the IAM role ARN with the role that you are using. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name coredns \ + --resolve-conflicts PRESERVE --configuration-values '{"autoScaling":{"enabled":true}}' +---- ++ +Amazon EKS applies changes to the EKS Add-ons by using a _rollout_ of the [.noloc]`Kubernetes` Deployment for CoreDNS. You can track the status of the rollout in the *Update history* of the add-on in the {aws-management-console} and with `kubectl rollout status deployment/coredns --namespace kube-system`. ++ +`kubectl rollout` has the following commands: ++ +[source,shell,subs="verbatim,attributes"] +---- +kubectl rollout + +history -- View rollout history +pause -- Mark the provided resource as paused +restart -- Restart a resource +resume -- Resume a paused resource +status -- Show the status of the rollout +undo -- Undo a previous rollout +---- ++ +If the rollout takes too long, Amazon EKS will undo the rollout, and a message with the type of *Addon Update* and a status of *Failed* will be added to the *Update history* of the add-on. To investigate any issues, start from the history of the rollout, and run `kubectl logs` on a [.noloc]`CoreDNS` pod to see the logs of [.noloc]`CoreDNS`. +. (Optional) You can provide minimum and maximum values that autoscaling can scale the number of [.noloc]`CoreDNS` pods to. ++ +The following example shows autoscaling is enabled and all of the optional keys have values. We recommend that the minimum number of [.noloc]`CoreDNS` pods is always greater than 2 to provide resilience for the DNS service in the cluster. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name coredns \ + --resolve-conflicts PRESERVE --configuration-values '{"autoScaling":{"enabled":true,"minReplicas":2,"maxReplicas":10}}' +---- +. Check the status of the update to the add-on by running the following command: ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name coredns \ +---- ++ +If you see this line: `"status": "ACTIVE"`, then the rollout has completed and the add-on is using the new configuration in all of the [.noloc]`CoreDNS` pods. As you change the number of nodes and CPU cores of nodes in the cluster, Amazon EKS scales the number of replicas of the [.noloc]`CoreDNS` deployment. + +==== + +[.topic] +[[coredns-metrics,coredns-metrics.title]] +=== Monitor [.noloc]`Kubernetes` DNS resolution with [.noloc]`CoreDNS` metrics + +[abstract] +-- +Learn how to collect [.noloc]`CoreDNS` metrics in Amazon EKS using Prometheus or CloudWatch Agent, enabling monitoring and observability for your [.noloc]`Kubernetes` DNS resolution. +-- + +[.noloc]`CoreDNS` as an EKS add-on exposes the metrics from [.noloc]`CoreDNS` on port `9153` in the Prometheus format in the `kube-dns` service. You can use Prometheus, the Amazon CloudWatch agent, or any other compatible system to scrape (collect) these metrics. + +For an example _scrape configuration_ that is compatible with both Prometheus and the CloudWatch agent, see link:AmazonCloudWatch/latest/monitoring/ContainerInsights-Prometheus-Setup-configure.html[CloudWatch agent configuration for Prometheus,type="documentation"] in the _Amazon CloudWatch User Guide_. + +[.topic] +[[managing-kube-proxy,managing-kube-proxy.title]] +== Manage `kube-proxy` in Amazon EKS clusters + +[abstract] +-- +Learn how to manage the `kube-proxy` add-on on your Amazon EKS cluster to manage network rules and enable network communication to your Pods. +-- + +[TIP] +==== +With Amazon EKS Auto Mode, you don't need to install or upgrade networking add-ons. Auto Mode includes pod networking and load balancing capabilities. + +For more information, see <>. +==== + +//GDC: Need DF to review + + +We recommend adding the Amazon EKS type of the add-on to your cluster instead of using the self-managed type of the add-on. If you're not familiar with the difference between the types, see <>. For more information about adding an Amazon EKS add-on to your cluster, see <>. If you're unable to use the Amazon EKS add-on, we encourage you to submit an issue about why you can't to the https://github.com/aws/containers-roadmap/issues[Containers roadmap GitHub repository]. + +The `kube-proxy` add-on is deployed on each Amazon EC2 node in your Amazon EKS cluster. It maintains network rules on your nodes and enables network communication to your [.noloc]`Pods`. The add-on isn't deployed to Fargate nodes in your cluster. For more information, see https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/[kube-proxy] in the [.noloc]`Kubernetes` documentation. + +=== Install as Amazon EKS Add-on + + +[[kube-proxy-versions,kube-proxy-versions.title]] +=== `kube-proxy` versions + +The following table lists the latest version of the Amazon EKS add-on type for each [.noloc]`Kubernetes` version. + +[options="header"] +|=== +| Kubernetes version | `kube-proxy` version +| 1.31 | v1.31.3-eksbuild.2 +| 1.30 | v1.30.7-eksbuild.2 +| 1.29 | v1.29.11-eksbuild.2 +| 1.28 | v1.28.15-eksbuild.4 +| 1.27 | v1.27.16-eksbuild.14 +| 1.26 | v1.26.15-eksbuild.19 +| 1.25 | v1.25.16-eksbuild.22 +| 1.24 | v1.24.17-eksbuild.19 +| 1.23 | v1.23.17-eksbuild.20 +|=== + +[NOTE] +==== + +An earlier version of the documentation was incorrect. `kube-proxy` versions `v1.28.5`, `v1.27.9`, and `v1.26.12` aren't available. + +If you're self-managing this add-on, the versions in the table might not be the same as the available self-managed versions. + +==== + +[[managing-kube-proxy-images,managing-kube-proxy-images.title]] +=== `kube-proxy` container image migration + +There are two types of the `kube-proxy` container image available for each Amazon EKS cluster version: + + + +* *Default* – This image type is based on a Debian-based Docker image that is maintained by the [.noloc]`Kubernetes` upstream community. +* *Minimal* – This image type is based on a https://gallery.ecr.aws/eks-distro-build-tooling/eks-distro-minimal-base-iptables[minimal base image] maintained by Amazon EKS Distro, which contains minimal packages and doesn't have shells. For more information, see https://distro.eks.amazonaws.com/[Amazon EKS Distro]. + +The following table lists the latest available self-managed `kube-proxy` container image version for each Amazon EKS cluster version. + +// GDC Update + +[options="header"] +|=== +| Version | kube-proxy (default type) | kube-proxy (minimal type) +| 1.31 | Only minimal type is available | v1.31.2-minimal-eksbuild.3 +| 1.30 | Only minimal type is available | v1.30.6-minimal-eksbuild.3 +| 1.29 | Only minimal type is available | v1.29.10-minimal-eksbuild.3 +| 1.28 | Only minimal type is available | v1.28.15-minimal-eksbuild.4 +| 1.27 | Only minimal type is available | v1.27.16-minimal-eksbuild.14 +| 1.26 | Only minimal type is available | v1.26.15-minimal-eksbuild.19 +| 1.25 | Only minimal type is available | v1.25.16-minimal-eksbuild.22 +| 1.24 | v1.24.10-eksbuild.2 | v1.24.17-minimal-eksbuild.19 +| 1.23 | v1.23.16-eksbuild.2 | v1.23.17-minimal-eksbuild.20 +|=== + + +* The default image type isn't available for [.noloc]`Kubernetes` version `1.25` and later. You must use the minimal image type. +* When you <>, you specify a valid Amazon EKS add-on version, which might not be a version listed in this table. This is because <> versions don't always match container image versions specified when updating the self-managed type of this add-on. When you update the self-managed type of this add-on, you specify a valid container image version listed in this table. + + +[.topic] +[[kube-proxy-add-on-self-managed-update,kube-proxy-add-on-self-managed-update.title]] +=== Update the Kubernetes `kube-proxy` self-managed add-on + +[IMPORTANT] +==== + +We recommend adding the Amazon EKS type of the add-on to your cluster instead of using the self-managed type of the add-on. If you're not familiar with the difference between the types, see <>. For more information about adding an Amazon EKS add-on to your cluster, see <>. If you're unable to use the Amazon EKS add-on, we encourage you to submit an issue about why you can't to the https://github.com/aws/containers-roadmap/issues[Containers roadmap GitHub repository]. + +==== + +[[managing-kube-proxy-prereqs,managing-kube-proxy-prereqs.title]] +==== Prerequisites + +* An existing Amazon EKS cluster. To deploy one, see <>. + + +[[managing-kube-proxy-considerations,managing-kube-proxy-considerations.title]] +==== Considerations + +* `Kube-proxy` on an Amazon EKS cluster has the same https://kubernetes.io/releases/version-skew-policy/#kube-proxy[compatibility and skew policy as Kubernetes]. Learn how to <>. +. Confirm that you have the self-managed type of the add-on installed on your cluster. Replace [.replaceable]`my-cluster` with the name of your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-addon --cluster-name my-cluster --addon-name kube-proxy --query addon.addonVersion --output text +---- ++ +If an error message is returned, you have the self-managed type of the add-on installed on your cluster. The remaining steps in this topic are for updating the self-managed type of the add-on. If a version number is returned, you have the Amazon EKS type of the add-on installed on your cluster. To update it, use the procedure in <>, rather than using the procedure in this topic. If you're not familiar with the differences between the add-on types, see <>. +. See which version of the container image is currently installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset kube-proxy -n kube-system | grep Image +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +Image: 602401143452.dkr.ecr.region-code.amazonaws.com/eks/kube-proxy:v1.29.1-eksbuild.2 +---- ++ +In the example output, [.replaceable]`v1.29.1-eksbuild.2` is the version installed on the cluster. +. Update the `kube-proxy` add-on by replacing [.replaceable]`602401143452` and [.replaceable]`region-code` with the values from your output in the previous step. Replace [.replaceable]`v1.30.6-eksbuild.3` with the `kube-proxy` version listed in the <> table. ++ +IMPORTANT: The manifests for each image type are different and not compatible between the _default_ or _minimal_ image types. You must use the same image type as the previous image, so that the entrypoint and arguments match. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=602401143452.dkr.ecr.region-code.amazonaws.com/eks/kube-proxy:v1.30.6-eksbuild.3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +daemonset.apps/kube-proxy image updated +---- +. Confirm that the new version is now installed on your cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl describe daemonset kube-proxy -n kube-system | grep Image | cut -d ":" -f 3 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +v1.30.0-eksbuild.3 +---- +. If you're using `x86` and `Arm` nodes in the same cluster and your cluster was deployed before August 17, 2020. Then, edit your `kube-proxy` manifest to include a node selector for multiple hardware architectures with the following command. This is a one-time operation. After you've added the selector to your manifest, you don't need to add it each time you update the add-on. If your cluster was deployed on or after August 17, 2020, then `kube-proxy` is already multi-architecture capable. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit -n kube-system daemonset/kube-proxy +---- ++ +Add the following node selector to the file in the editor and then save the file. For an example of where to include this text in the editor, see the https://github.com/aws/amazon-vpc-cni-k8s/blob/release-1.11/config/master/aws-k8s-cni.yaml#L265-#L269[CNI manifest] file on [.noloc]`GitHub`. This enables [.noloc]`Kubernetes` to pull the correct hardware image based on the node's hardware architecture. ++ +[source,yaml,subs="verbatim,attributes"] +---- +- key: "kubernetes.io/arch" + operator: In + values: + - amd64 + - arm64 +---- +. If your cluster was originally created with [.noloc]`Kubernetes` version `1.14` or later, then you can skip this step because `kube-proxy` already includes this `Affinity Rule`. If you originally created an Amazon EKS cluster with [.noloc]`Kubernetes` version `1.13` or earlier and intend to use Fargate nodes in your cluster, then edit your `kube-proxy` manifest to include a `NodeAffinity` rule to prevent `kube-proxy` [.noloc]`Pods` from scheduling on Fargate nodes. This is a one-time edit. Once you've added the `Affinity Rule` to your manifest, you don't need to add it each time that you update the add-on. Edit your `kube-proxy` [.noloc]`DaemonSet`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit -n kube-system daemonset/kube-proxy +---- ++ +Add the following `Affinity Rule` to the [.noloc]`DaemonSet`spec`` section of the file in the editor and then save the file. For an example of where to include this text in the editor, see the https://github.com/aws/amazon-vpc-cni-k8s/blob/release-1.11/config/master/aws-k8s-cni.yaml#L270-#L273[CNI manifest] file on [.noloc]`GitHub`. ++ +[source,yaml,subs="verbatim,attributes"] +---- +- key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate +---- diff --git a/latest/ug/networking/eks-networking.adoc b/latest/ug/networking/eks-networking.adoc new file mode 100644 index 00000000..75fcb437 --- /dev/null +++ b/latest/ug/networking/eks-networking.adoc @@ -0,0 +1,40 @@ +//!!NODE_ROOT +[[eks-networking,eks-networking.title]] += Configure networking for Amazon EKS clusters +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Configure networking for Amazon EKS clusters +:info_titleabbrev: Configure networking +:info_abstract: Learn how to configure networking for your Amazon EKS cluster using a VPC, subnets, \ + security groups, and networking add-ons to ensure secure and efficient \ + communication. + +include::../attributes.txt[] + +[abstract] +-- +Learn how to configure networking for your Amazon EKS cluster using a VPC, subnets, security groups, and networking add-ons to ensure secure and efficient communication. +-- + +Your Amazon EKS cluster is created in a VPC. Pod networking is provided by the Amazon VPC Container Network Interface (CNI) plugin for nodes that run on {aws} infrastructure. If you are running nodes on your own infrastructure, see <>. This chapter includes the following topics for learning more about networking for your cluster. + +[.topiclist] +[[Topic List]] + +include::network-reqs.adoc[leveloffset=+1] + + +include::creating-a-vpc.adoc[leveloffset=+1] + + +include::sec-group-reqs.adoc[leveloffset=+1] + + +include::eks-networking-add-ons.adoc[leveloffset=+1] diff --git a/latest/ug/networking/images b/latest/ug/networking/images new file mode 120000 index 00000000..5e675731 --- /dev/null +++ b/latest/ug/networking/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/latest/ug/networking/network-policies-troubleshooting.adoc b/latest/ug/networking/network-policies-troubleshooting.adoc new file mode 100644 index 00000000..13d78926 --- /dev/null +++ b/latest/ug/networking/network-policies-troubleshooting.adoc @@ -0,0 +1,261 @@ +//!!NODE_ROOT
+[.topic] +[[network-policies-troubleshooting,network-policies-troubleshooting.title]] += Troubleshooting [.noloc]`Kubernetes` network policies For Amazon EKS +:info_titleabbrev: Troubleshooting + +include::../attributes.txt[] + +[abstract] +-- +Learn how to troubleshoot and investigate network connections that use network policies. +-- + +You can troubleshoot and investigate network connections that use network policies by reading the <> and by running tools from the <>. + +[[network-policies-troubleshooting-flowlogs,network-policies-troubleshooting-flowlogs.title]] +== Network policy logs + +Whether connections are allowed or denied by a network policies is logged in _flow logs_. The network policy logs on each node include the flow logs for every pod that has a network policy. Network policy logs are stored at `/var/log/aws-routed-eni/network-policy-agent.log`. The following example is from a `network-policy-agent.log` file: + +[source,bash,subs="verbatim,attributes"] +---- +{"level":"info","timestamp":"2023-05-30T16:05:32.573Z","logger":"ebpf-client","msg":"Flow Info: ","Src +IP":"192.168.87.155","Src Port":38971,"Dest IP":"64.6.160","Dest +Port":53,"Proto":"UDP","Verdict":"ACCEPT"} +---- + +Network policy logs are disabled by default. To enable the network policy logs, follow these steps: + +[NOTE] +==== + +Network policy logs require an additional 1 vCPU for the `aws-network-policy-agent` container in the VPC CNI `aws-node` daemonset manifest. + +==== + +[[cni-network-policy-flowlogs-addon,cni-network-policy-flowlogs-addon.title]] +=== Amazon EKS add-on + +*{aws-management-console}*:: + +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the Amazon VPC CNI add-on for. +.. Choose the *Add-ons* tab. +.. Select the box in the top right of the add-on box and then choose *Edit*. +.. On the *Configure [.replaceable]`name of addon`* page: ++ +... Select a `v1.14.0-eksbuild.3` or later version in the *Version* dropdown list. +... Expand the *Optional configuration settings*. +... Enter the top-level JSON key `"nodeAgent":` and value is an object with a key `"enablePolicyEventLogs":` and value of `"true"` in *Configuration values*. The resulting text must be a valid JSON object. The following example shows network policy and the network policy logs are enabled, and the network policy logs are sent to CloudWatch Logs: ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "enableNetworkPolicy": "true", + "nodeAgent": { + "enablePolicyEventLogs": "true" + } +} +---- + +The following screenshot shows an example of this scenario. + +image::images/console-cni-config-network-policy-logs.png[{aws-management-console} showing the VPC CNI add-on with network policy and CloudWatch Logs in the optional configuration.,scaledwidth=80%] + + +{aws} CLI:: +.. Run the following {aws} CLI command. Replace `my-cluster` with the name of your cluster and replace the IAM role ARN with the role that you are using. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name vpc-cni --addon-version v1.14.0-eksbuild.3 \ + --service-account-role-arn {arn-aws}iam::123456789012:role/AmazonEKSVPCCNIRole \ + --resolve-conflicts PRESERVE --configuration-values '{"nodeAgent": {"enablePolicyEventLogs": "true"}}' +---- + + +[[cni-network-policy-flowlogs-selfmanaged,cni-network-policy-flowlogs-selfmanaged.title]] +=== Self-managed add-on + +Helm:: + +If you have installed the [.noloc]`Amazon VPC CNI plugin for Kubernetes` through `helm`, you can update the configuration to write the network policy logs. + +.. Run the following command to enable network policy. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm upgrade --set nodeAgent.enablePolicyEventLogs=true aws-vpc-cni --namespace kube-system eks/aws-vpc-cni +---- + + +[.noloc]`kubectl`:: + +If you have installed the [.noloc]`Amazon VPC CNI plugin for Kubernetes` through `kubectl`, you can update the configuration to write the network policy logs. + +.. Open the `aws-node` `DaemonSet` in your editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit daemonset -n kube-system aws-node +---- +.. Replace the `false` with `true` in the command argument `--enable-policy-event-logs=false` in the `args:` in the `aws-network-policy-agent` container in the VPC CNI `aws-node` daemonset manifest. ++ +[source,yaml,subs="verbatim,attributes"] +---- + - args: + - --enable-policy-event-logs=true +---- + + +[[network-policies-cloudwatchlogs,network-policies-cloudwatchlogs.title]] +== Send network policy logs to Amazon CloudWatch Logs + +You can monitor the network policy logs using services such as Amazon CloudWatch Logs. You can use the following methods to send the network policy logs to CloudWatch Logs. + +For EKS clusters, the policy logs will be located under `/aws/eks/[.replaceable]``cluster-name``/cluster/` and for self-managed K8S clusters, the logs will be placed under `/aws/k8s-cluster/cluster/`. + +[[network-policies-cwl-agent,network-policies-cwl-agent.title]] +=== Send network policy logs with [.noloc]`Amazon VPC CNI plugin for Kubernetes` + +If you enable network policy, a second container is add to the `aws-node` pods for a _node agent_. This node agent can send the network policy logs to CloudWatch Logs. + +[NOTE] +==== + +Only the network policy logs are sent by the node agent. Other logs made by the VPC CNI aren't included. + +==== + +[[cni-network-policy-cwl-agent-prereqs,cni-network-policy-cwl-agent-prereqs.title]] +==== Prerequisites + +* Add the following permissions as a stanza or separate policy to the IAM role that you are using for the VPC CNI. ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "logs:DescribeLogGroups", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Resource": "*" + } + ] +} +---- + + +[[cni-network-policy-cwl-agent-addon,cni-network-policy-cwl-agent-addon.title]] +==== Amazon EKS add-on + +*{aws-management-console}*:: + +.. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +.. In the left navigation pane, select *Clusters*, and then select the name of the cluster that you want to configure the Amazon VPC CNI add-on for. +.. Choose the *Add-ons* tab. +.. Select the box in the top right of the add-on box and then choose *Edit*. +.. On the *Configure [.replaceable]`name of addon`* page: ++ +... Select a `v1.14.0-eksbuild.3` or later version in the *Version* dropdown list. +... Expand the *Optional configuration settings*. +... Enter the top-level JSON key `"nodeAgent":` and value is an object with a key `"enableCloudWatchLogs":` and value of `"true"` in *Configuration values*. The resulting text must be a valid JSON object. The following example shows network policy and the network policy logs are enabled, and the logs are sent to CloudWatch Logs: ++ +[source,json,subs="verbatim,attributes"] +---- +{ + "enableNetworkPolicy": "true", + "nodeAgent": { + "enablePolicyEventLogs": "true", + "enableCloudWatchLogs": "true", + } +} +---- + ++ +The following screenshot shows an example of this scenario. ++ +image::images/console-cni-config-network-policy-logs-cwl.png[{aws-management-console} showing the VPC CNI add-on with network policy and CloudWatch Logs in the optional configuration.,scaledwidth=80%] + + +{aws} CLI:: +.. Run the following {aws} CLI command. Replace `my-cluster` with the name of your cluster and replace the IAM role ARN with the role that you are using. ++ +[source,shell,subs="verbatim,attributes"] +---- +aws eks update-addon --cluster-name my-cluster --addon-name vpc-cni --addon-version v1.14.0-eksbuild.3 \ + --service-account-role-arn {arn-aws}iam::123456789012:role/AmazonEKSVPCCNIRole \ + --resolve-conflicts PRESERVE --configuration-values '{"nodeAgent": {"enablePolicyEventLogs": "true", "enableCloudWatchLogs": "true"}}' +---- + + +[[cni-network-policy-cwl-agent-selfmanaged,cni-network-policy-cwl-agent-selfmanaged.title]] +==== Self-managed add-on + +Helm:: + +If you have installed the [.noloc]`Amazon VPC CNI plugin for Kubernetes` through `helm`, you can update the configuration to send network policy logs to CloudWatch Logs. + +.. Run the following command to enable network policy logs and send them to CloudWatch Logs. ++ +[source,shell,subs="verbatim,attributes"] +---- +helm upgrade --set nodeAgent.enablePolicyEventLogs=true --set nodeAgent.enableCloudWatchLogs=true aws-vpc-cni --namespace kube-system eks/aws-vpc-cni +---- + + +[.noloc]`kubectl`:: +.. Open the `aws-node` `DaemonSet` in your editor. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl edit daemonset -n kube-system aws-node +---- +.. Replace the `false` with `true` in two command arguments `--enable-policy-event-logs=false` and `--enable-cloudwatch-logs=false` in the `args:` in the `aws-network-policy-agent` container in the VPC CNI `aws-node` daemonset manifest. ++ +[source,yaml,subs="verbatim,attributes"] +---- + - args: + - --enable-policy-event-logs=true + - --enable-cloudwatch-logs=true +---- + + +[[network-policies-cwl-fluentbit,network-policies-cwl-fluentbit.title]] +=== Send network policy logs with a [.noloc]`Fluent Bit` daemonset + +If you are using [.noloc]`Fluent Bit` in a daemonset to send logs from your nodes, you can add configuration to include the network policy logs from network policies. You can use the following example configuration: + +[source,toml,subs="verbatim,attributes"] +---- + [INPUT] + Name tail + Tag eksnp.* + Path /var/log/aws-routed-eni/network-policy-agent*.log + Parser json + DB /var/log/aws-routed-eni/flb_npagent.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 +---- + + +[[network-policies-ebpf-sdk,network-policies-ebpf-sdk.title]] +== Included [.noloc]`eBPF` SDK + +The [.noloc]`Amazon VPC CNI plugin for Kubernetes` installs [.noloc]`eBPF` SDK collection of tools on the nodes. You can use the [.noloc]`eBPF` SDK tools to identify issues with network policies. For example, the following command lists the programs that are running on the node. + +[source,bash,subs="verbatim,attributes"] +---- +sudo /opt/cni/bin/aws-eks-na-cli ebpf progs +---- + +To run this command, you can use any method to connect to the node. diff --git a/latest/ug/networking/network-policy-stars-demo.adoc b/latest/ug/networking/network-policy-stars-demo.adoc new file mode 100644 index 00000000..95778b62 --- /dev/null +++ b/latest/ug/networking/network-policy-stars-demo.adoc @@ -0,0 +1,189 @@ +//!!NODE_ROOT
+[.topic] +[[network-policy-stars-demo,network-policy-stars-demo.title]] += Stars demo of network policy for Amazon EKS +:info_titleabbrev: Stars policy demo + +include::../attributes.txt[] + +[abstract] +-- +This demo creates a front-end, back-end, and client service on your Amazon EKS cluster. The demo also creates a management graphical user interface that shows the available ingress and egress paths between each service. +-- + +This demo creates a front-end, back-end, and client service on your Amazon EKS cluster. The demo also creates a management graphical user interface that shows the available ingress and egress paths between each service. We recommend that you complete the demo on a cluster that you don't run production workloads on. + +Before you create any network policies, all services can communicate bidirectionally. After you apply the network policies, you can see that the client can only communicate with the front-end service, and the back-end only accepts traffic from the front-end. + +. Apply the front-end, back-end, client, and management user interface services: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/namespace.yaml +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/management-ui.yaml +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/backend.yaml +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/frontend.yaml +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/client.yaml +---- +. View all [.noloc]`Pods` on the cluster. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get pods -A +---- ++ +An example output is as follows. ++ +In your output, you should see pods in the namespaces shown in the following output. The [.replaceable]`NAMES` of your pods and the number of pods in the `READY` column are different than those in the following output. Don't continue until you see pods with similar names and they all have `Running` in the `STATUS` column. ++ +[source,bash,subs="verbatim,attributes"] +---- +NAMESPACE NAME READY STATUS RESTARTS AGE +[...] +client client-xlffc 1/1 Running 0 5m19s +[...] +management-ui management-ui-qrb2g 1/1 Running 0 5m24s +stars backend-sz87q 1/1 Running 0 5m23s +stars frontend-cscnf 1/1 Running 0 5m21s +[...] +---- +. To connect to the management user interface, connect to the `EXTERNAL-IP` of the service running on your cluster: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get service/management-ui -n management-ui +---- +. Open the a browser to the location from the previous step. You should see the management user interface. The *C* node is the client service, the *F* node is the front-end service, and the *B* node is the back-end service. Each node has full communication access to all other nodes, as indicated by the bold, colored lines. ++ +image::images/stars-default.png[Open network policy,scaledwidth=100%] +. Apply the following network policy in both the `stars` and `client` namespaces to isolate the services from each other: ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: default-deny +spec: + podSelector: + matchLabels: {} +---- ++ +You can use the following commands to apply the policy to both namespaces: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -n stars -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/apply_network_policies.files/default-deny.yaml +kubectl apply -n client -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/apply_network_policies.files/default-deny.yaml +---- +. Refresh your browser. You see that the management user interface can no longer reach any of the nodes, so they don't show up in the user interface. +. Apply the following different network policies to allow the management user interface to access the services. Apply this policy to allow the UI: ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: stars + name: allow-ui +spec: + podSelector: + matchLabels: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + role: management-ui +---- ++ +Apply this policy to allow the client: ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: client + name: allow-ui +spec: + podSelector: + matchLabels: {} + ingress: + - from: + - namespaceSelector: + matchLabels: + role: management-ui +---- ++ +You can use the following commands to apply both policies: ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/apply_network_policies.files/allow-ui.yaml +kubectl apply -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/apply_network_policies.files/allow-ui-client.yaml +---- +. Refresh your browser. You see that the management user interface can reach the nodes again, but the nodes cannot communicate with each other. ++ +image::images/stars-no-traffic.png[UI access network policy,scaledwidth=100%] +. Apply the following network policy to allow traffic from the front-end service to the back-end service: ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: stars + name: backend-policy +spec: + podSelector: + matchLabels: + role: backend + ingress: + - from: + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 +---- +. Refresh your browser. You see that the front-end can communicate with the back-end. ++ +image::images/stars-front-end-back-end.png[Front-end to back-end policy,scaledwidth=100%] +. Apply the following network policy to allow traffic from the client to the front-end service: ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + namespace: stars + name: frontend-policy +spec: + podSelector: + matchLabels: + role: frontend + ingress: + - from: + - namespaceSelector: + matchLabels: + role: client + ports: + - protocol: TCP + port: 80 +---- +. Refresh your browser. You see that the client can communicate to the front-end service. The front-end service can still communicate to the back-end service. ++ +image::images/stars-final.png[Final network policy,scaledwidth=100%] +. (Optional) When you are done with the demo, you can delete its resources. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl delete -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/client.yaml +kubectl delete -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/frontend.yaml +kubectl delete -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/backend.yaml +kubectl delete -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/management-ui.yaml +kubectl delete -f https://eksworkshop.com/beginner/120_network-policies/calico/stars_policy_demo/create_resources.files/namespace.yaml +---- ++ +Even after deleting the resources, there can still be network policy endpoints on the nodes that might interfere in unexpected ways with networking in your cluster. The only sure way to remove these rules is to reboot the nodes or terminate all of the nodes and recycle them. To terminate all nodes, either set the Auto Scaling Group desired count to 0, then back up to the desired number, or just terminate the nodes. diff --git a/latest/ug/networking/network-reqs.adoc b/latest/ug/networking/network-reqs.adoc new file mode 100644 index 00000000..fdebdfae --- /dev/null +++ b/latest/ug/networking/network-reqs.adoc @@ -0,0 +1,290 @@ +//!!NODE_ROOT
+[.topic] +[[network-reqs,network-reqs.title]] += View Amazon EKS networking requirements for VPC and subnets +:info_doctype: section +:info_title: View Amazon EKS networking requirements for VPC and subnets +:info_titleabbrev: VPC and subnet requirements +:info_abstract: Learn how to configure the VPC and subnets to meet networking \ + requirements for creating Amazon EKS clusters with sufficient IP addresses, subnet \ + types, and availability zones. Understand IP family usage by component and shared \ + subnet considerations. + +include::../attributes.txt[] + +[abstract] +-- +Learn how to configure the VPC and subnets to meet networking requirements for creating Amazon EKS clusters with sufficient IP addresses, subnet types, and availability zones. Understand IP family usage by component and shared subnet considerations. +-- + +When you create a cluster, you specify a link:vpc/latest/userguide/configure-your-vpc.html[VPC,type="documentation"] and at least two subnets that are in different Availability Zones. This topic provides an overview of Amazon EKS specific requirements and considerations for the VPC and subnets that you use with your cluster. If you don't have a VPC to use with Amazon EKS, see <>. If you're creating a local or extended cluster on {aws} Outposts, see <> instead of this topic. The content in this topic applies for Amazon EKS clusters with hybrid nodes. For additional networking requirements for hybrid nodes, see <>. + +[[network-requirements-vpc,network-requirements-vpc.title]] +== VPC requirements and considerations + +When you create a cluster, the VPC that you specify must meet the following requirements and considerations: + + + +* The VPC must have a sufficient number of IP addresses available for the cluster, any nodes, and other [.noloc]`Kubernetes` resources that you want to create. If the VPC that you want to use doesn't have a sufficient number of IP addresses, try to increase the number of available IP addresses. ++ +You can do this by updating the cluster configuration to change which subnets and security groups the cluster uses. You can update from the {aws-management-console}, the latest version of the {aws} CLI, {aws} CloudFormation, and `eksctl` version `v0.164.0-rc.0` or later. You might need to do this to provide subnets with more available IP addresses to successfully upgrade a cluster version. ++ +[IMPORTANT] +==== + +All subnets that you add must be in the same set of AZs as originally provided when you created the cluster. New subnets must satisfy all of the other requirements, for example they must have sufficient IP addresses. + +For example, assume that you made a cluster and specified four subnets. In the order that you specified them, the first subnet is in the `us-west-2a` Availability Zone, the second and third subnets are in `us-west-2b` Availability Zone, and the fourth subnet is in `us-west-2c` Availability Zone. If you want to change the subnets, you must provide at least one subnet in each of the three Availability Zones, and the subnets must be in the same VPC as the original subnets. + +==== ++ +If you need more IP addresses than the CIDR blocks in the VPC have, you can add additional CIDR blocks by link:vpc/latest/userguide/working-with-vpcs.html#add-ipv4-cidr[associating additional Classless Inter-Domain Routing (CIDR) blocks,type="documentation"] with your VPC. You can associate private ([.noloc]`RFC 1918`) and public (non-[.noloc]`RFC 1918`) CIDR blocks to your VPC either before or after you create your cluster. It can take a cluster up to five hours for a CIDR block that you associated with a VPC to be recognized. ++ +You can conserve IP address utilization by using a transit gateway with a shared services VPC. For more information, see link:vpc/latest/tgw/transit-gateway-isolated-shared.html[Isolated VPCs with shared services,type="documentation"] and link:containers/eks-vpc-routable-ip-address-conservation[Amazon EKS VPC routable IP address conservation patterns in a hybrid network,type="blog"]. +* If you want [.noloc]`Kubernetes` to assign `IPv6` addresses to [.noloc]`Pods` and services, associate an `IPv6` CIDR block with your VPC. For more information, see link:vpc/latest/userguide/working-with-vpcs.html#vpc-associate-ipv6-cidr[Associate an IPv6 CIDR block with your VPC,type="documentation"] in the Amazon VPC User Guide. You cannot use `IPv6` addresses with Pods and services running on hybrid nodes and you cannot use hybrid nodes with clusters configured with the `IPv6` IP address family. +* The VPC must have `DNS` hostname and `DNS` resolution support. Otherwise, nodes can't register to your cluster. For more information, see link:vpc/latest/userguide/vpc-dns.html[DNS attributes for your VPC,type="documentation"] in the Amazon VPC User Guide. +* The VPC might require VPC endpoints using {aws} PrivateLink. For more information, see <>. + +If you created a cluster with [.noloc]`Kubernetes` `1.14` or earlier, Amazon EKS added the following tag to your VPC: + +[cols="1,1", options="header"] +|=== +|Key +|Value + + +|``kubernetes.io/cluster/[.replaceable]`my-cluster``` +|`owned` +|=== + +This tag was only used by Amazon EKS. You can remove the tag without impacting your services. It's not used with clusters that are version `1.15` or later. + +[[network-requirements-subnets,network-requirements-subnets.title]] +== Subnet requirements and considerations + +When you create a cluster, Amazon EKS creates 2–4 link:AWSEC2/latest/UserGuide/using-eni.html[elastic network interfaces,type="documentation"] in the subnets that you specify. These network interfaces enable communication between your cluster and your VPC. These network interfaces also enable [.noloc]`Kubernetes` features such as `kubectl exec` and `kubectl logs`. Each Amazon EKS created network interface has the text `Amazon EKS [.replaceable]``cluster-name``` in its description. + +Amazon EKS can create its network interfaces in any subnet that you specify when you create a cluster. You can change which subnets Amazon EKS creates its network interfaces in after your cluster is created. When you update the [.noloc]`Kubernetes` version of a cluster, Amazon EKS deletes the original network interfaces that it created, and creates new network interfaces. These network interfaces might be created in the same subnets as the original network interfaces or in different subnets than the original network interfaces. To control which subnets network interfaces are created in, you can limit the number of subnets you specify to only two when you create a cluster or update the subnets after creating the cluster. + +[[cluster-subnets,cluster-subnets.title]] +=== Subnet requirements for clusters + +The link:vpc/latest/userguide/configure-subnets.html#subnet-types[subnets,type="documentation"] that you specify when you create or update a cluster must meet the following requirements: + + + +* The subnets must each have at least six IP addresses for use by Amazon EKS. However, we recommend at least 16 IP addresses. +* The subnets must be in at least two different Availability Zones. +* The subnets can't reside in {aws} Outposts or {aws} Wavelength. However, if you have them in your VPC, you can deploy self-managed nodes and [.noloc]`Kubernetes` resources to these types of subnets. For more information about self-managed nodes, see <>. +* The subnets can be a public or private. However, we recommend that you specify private subnets, if possible. A public subnet is a subnet with a route table that includes a route to an link:vpc/latest/userguide/VPC_Internet_Gateway.html[internet gateway,type="documentation"], whereas a private subnet is a subnet with a route table that doesn't include a route to an internet gateway. +* The subnets can't reside in the following Availability Zones: ++ +[cols="1,1,1", options="header"] +|=== +|{aws} Region +|Region name +|Disallowed Availability Zone IDs + + +|`us-east-1` +|US East (N. Virginia) +|`use1-az3` + +|`us-west-1` +|US West (N. California) +|`usw1-az2` + +|`ca-central-1` +|Canada (Central) +|`cac1-az3` +|=== + + +[[network-requirements-ip-table]] +=== IP address family usage by component + +The following table contains the IP address family used by each component of Amazon EKS. You can use a network address translation (NAT) or other compatibility system to connect to these components from source IP addresses in families with the [.noloc]`"No"` value for a table entry. + +Functionality can differ depending on the [.noloc]`IP family` (`ipFamily`) setting of the cluster. This setting changes the type of IP addresses used for the [.noloc]`CIDR` block that [.noloc]`Kubernetes` assigns to [.noloc]`Services`. A cluster with the setting value of [.noloc]`IPv4` is referred to as an _IPv4 cluster_, and a cluster with the setting value of [.noloc]`IPv6` is referred to as an _IPv6 cluster_. + +[cols="1,1,1,1", options="header"] +|=== +|Component +|IPv4 addresses +|IPv6 addresses +|Dual stack addresses + + +|EKS API public endpoint +|Yes^1,3^ +|Yes^1,3^ +|Yes^1,3^ + +|EKS API VPC endpoint +|Yes +|No +|No + +|EKS Auth API public endpoint (EKS Pod Identity) +|Yes^1^ +|Yes^1^ +|Yes^1^ + +|EKS Auth API VPC endpoint (EKS Pod Identity) +|Yes^1^ +|Yes^1^ +|Yes^1^ + +|`IPv4` [.noloc]`Kubernetes` cluster public endpoint^2^ +|Yes +|No +|No + +|`IPv4` [.noloc]`Kubernetes` cluster private endpoint^2^ +|Yes +|No +|No + +|`IPv6` [.noloc]`Kubernetes` cluster public endpoint^2^ +|Yes^1,4^ +|Yes^1,4^ +|Yes^4^ + +|`IPv6` [.noloc]`Kubernetes` cluster private endpoint^2^ +|Yes^1,4^ +|Yes^1,4^ +|Yes^4^ + +|[.noloc]`Kubernetes` cluster subnets +|Yes^2^ +|No +|Yes^2^ + +|Node Primary IP addresses +|Yes^2^ +|No +|Yes^2^ + +|Cluster [.noloc]`CIDR` range for [.noloc]`Service` IP addresses +|Yes^2^ +|Yes^2^ +|No + +|[.noloc]`Pod` IP addresses from the VPC CNI +|Yes^2^ +|Yes^2^ +|No + +|IRSA [.noloc]`OIDC` Issuer URLs +|Yes^1,3^ +|Yes^1,3^ +|Yes^1,3^ +|=== + +[NOTE] +==== + +^1^ The endpoint is dual stack with both `IPv4` and `IPv6` addresses. Your applications outside of {aws}, your nodes for the cluster, and your pods inside the cluster can reach this endpoint by either `IPv4` or `IPv6`. + +^2^ You choose between an `IPv4` cluster and `IPv6` cluster in the [.noloc]`IP family` (`ipFamily`) setting of the cluster when you create a cluster and this can't be changed. Instead, you must choose a different setting when you create another cluster and migrate your workloads. + +^3^ The dual-stack endpoint was introduced in August 2024. To use the dual-stack endpoints with the {aws} CLI, see the link:sdkref/latest/guide/feature-endpoints.html[Dual-stack and FIPS endpoints,type="documentation"] configuration in the _{aws} SDKs and Tools Reference Guide_. The following lists the new endpoints: + +*EKS API public endpoint*:: +`eks.[.replaceable]``region``.api.aws` + +*IRSA [.noloc]`OIDC` Issuer URLs*:: +`oidc-eks.[.replaceable]``region``.api.aws` + +^4^ The dual-stack cluster endpoint was introduced in October 2024. EKS creates the following endpoint for new clusters that are made after this date and that select `IPv6` in the IP family (ipFamily) setting of the cluster: + +*EKS cluster public/private endpoint*:: +`eks-cluster.[.replaceable]``region``.api.aws` + +==== + +[[node-subnet-reqs,node-subnet-reqs.title]] +=== Subnet requirements for nodes + +You can deploy nodes and [.noloc]`Kubernetes` resources to the same subnets that you specify when you create your cluster. However, this isn't necessary. This is because you can also deploy nodes and [.noloc]`Kubernetes` resources to subnets that you didn't specify when you created the cluster. If you deploy nodes to different subnets, Amazon EKS doesn't create cluster network interfaces in those subnets. Any subnet that you deploy nodes and [.noloc]`Kubernetes` resources to must meet the following requirements: + + + +* The subnets must have enough available IP addresses to deploy all of your nodes and [.noloc]`Kubernetes` resources to. +* If you want [.noloc]`Kubernetes` to assign `IPv6` addresses to [.noloc]`Pods` and services, then you must have one `IPv6` CIDR block and one `IPv4` CIDR block that are associated with your subnet. For more information, see link:vpc/latest/userguide/working-with-subnets.html#subnet-associate-ipv6-cidr[Associate an IPv6 CIDR block with your subnet,type="documentation"] in the Amazon VPC User Guide. The route tables that are associated with the subnets must include routes to `IPv4` and `IPv6` addresses. For more information, see link:vpc/latest/userguide/VPC_Route_Tables.html#route-table-routes[Routes,type="documentation"] in the Amazon VPC User Guide. Pods are assigned only an `IPv6` address. However the network interfaces that Amazon EKS creates for your cluster and your nodes are assigned an `IPv4` and an `IPv6` address. +* If you need inbound access from the internet to your [.noloc]`Pods`, make sure to have at least one public subnet with enough available IP addresses to deploy load balancers and ingresses to. You can deploy load balancers to public subnets. Load balancers can load balance to [.noloc]`Pods` in private or public subnets. We recommend deploying your nodes to private subnets, if possible. +* If you plan to deploy nodes to a public subnet, the subnet must auto-assign `IPv4` public addresses or `IPv6` addresses. If you deploy nodes to a private subnet that has an associated `IPv6` CIDR block, the private subnet must also auto-assign `IPv6` addresses. If you used the {aws} CloudFormation template provided by Amazon EKS to deploy your VPC after March 26, 2020, this setting is enabled. If you used the templates to deploy your VPC before this date or you use your own VPC, you must enable this setting manually. For the template, see <>. For more information, see link:vpc/latest/userguide/working-with-subnets.html#subnet-public-ip[Modify the public IPv4 addressing attribute for your subnet,type="documentation"] and link:vpc/latest/userguide/working-with-subnets.html#subnet-ipv6[Modify the IPv6 addressing attribute for your subnet,type="documentation"] in the link:vpc/latest/userguide/[Amazon VPC User Guide,type="documentation"]. +* If the subnet that you deploy a node to is a private subnet and its route table doesn't include a route to a network address translation link:vpc/latest/userguide/vpc-nat.html[(NAT) device,type="documentation"] (`IPv4`) or an link:vpc/latest/userguide/egress-only-internet-gateway.html[egress-only gateway,type="documentation"] (`IPv6`), add VPC endpoints using {aws} PrivateLink to your VPC. VPC endpoints are needed for all the {aws} services that your nodes and [.noloc]`Pods` need to communicate with. Examples include Amazon ECR, Elastic Load Balancing, Amazon CloudWatch, {aws} Security Token Service, and Amazon Simple Storage Service (Amazon S3). The endpoint must include the subnet that the nodes are in. Not all {aws} services support VPC endpoints. For more information, see link:vpc/latest/privatelink/what-is-privatelink.html[What is {aws} PrivateLink?,type="documentation"] and link:vpc/latest/privatelink/aws-services-privatelink-support.html[{aws} services that integrate with {aws} PrivateLink,type="documentation"]. For a list of more Amazon EKS requirements, see <>. +* If you want to deploy load balancers to a subnet, the subnet must have the following tag: ++ +** Private subnets ++ +[cols="1,1", options="header"] +|=== +|Key +|Value + + +|``kubernetes.io/role/internal-elb`` +|`1` +|=== +** Public subnets ++ +[cols="1,1", options="header"] +|=== +|Key +|Value + + +|``kubernetes.io/role/elb`` +|`1` +|=== + +When a [.noloc]`Kubernetes` cluster that's version `1.18` and earlier was created, Amazon EKS added the following tag to all of the subnets that were specified. + +[cols="1,1", options="header"] +|=== +|Key +|Value + + +|``kubernetes.io/cluster/[.replaceable]`my-cluster``` +|`shared` +|=== + +When you create a new [.noloc]`Kubernetes` cluster now, Amazon EKS doesn't add the tag to your subnets. If the tag was on subnets that were used by a cluster that was previously a version earlier than `1.19`, the tag wasn't automatically removed from the subnets when the cluster was updated to a newer version. Version `2.1.1` or earlier of the {aws} Load Balancer Controller requires this tag. If you are using a newer version of the Load Balancer Controller, you can remove the tag without interrupting your services. For more information about the controller, see <>. + +If you deployed a VPC by using `eksctl` or any of the Amazon EKS {aws} CloudFormation VPC templates, the following applies: + + + +* *On or after March 26, 2020* – Public `IPv4` addresses are automatically assigned by public subnets to new nodes that are deployed to public subnets. +* *Before March 26, 2020* – Public `IPv4` addresses aren't automatically assigned by public subnets to new nodes that are deployed to public subnets. + +This change impacts new node groups that are deployed to public subnets in the following ways: + + + +* *link:eks/latest/userguide/create-managed-node-group.html[Managed node groups,type="documentation"]* – If the node group is deployed to a public subnet on or after April 22, 2020, automatic assignment of public IP addresses must be enabled for the public subnet. For more information, see link:vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip[Modifying the public IPv4 addressing attribute for your subnet,type="documentation"]. +* *link:eks/latest/userguide/launch-workers.html[Linux,type="documentation"], link:eks/latest/userguide/launch-windows-workers.html[Windows,type="documentation"], or link:eks/latest/userguide/arm-ami.html[Arm,type="documentation"] self-managed node groups* – If the node group is deployed to a public subnet on or after March 26, 2020, automatic assignment of public IP addresses must be enabled for the public subnet. Otherwise, the nodes must be launched with a public IP address instead. For more information, see link:vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip[Modifying the public IPv4 addressing attribute for your subnet,type="documentation"] or link:vpc/latest/userguide/vpc-ip-addressing.html#vpc-public-ip[Assigning a public IPv4 address during instance launch,type="documentation"]. + + +[[network-requirements-shared,network-requirements-shared.title]] +== Shared subnet requirements and considerations + +You can use _VPC sharing_ to share subnets with other {aws} accounts within the same {aws} Organizations. You can create Amazon EKS clusters in shared subnets, with the following considerations: + + + +* The owner of the VPC subnet must share a subnet with a participant account before that account can create an Amazon EKS cluster in it. +* You can't launch resources using the default security group for the VPC because it belongs to the owner. Additionally, participants can't launch resources using security groups that are owned by other participants or the owner. +* In a shared subnet, the participant and the owner separately controls the security groups within each respective account. The subnet owner can see security groups that are created by the participants but cannot perform any actions on them. If the subnet owner wants to remove or modify these security groups, the participant that created the security group must take the action. +* If a cluster is created by a participant, the following considerations apply: ++ +** Cluster IAM role and Node IAM roles must be created in that account. For more information, see <> and <>. +** All nodes must be made by the same participant, including managed node groups. +* The shared VPC owner cannot view, update or delete a cluster that a participant creates in the shared subnet. This is in addition to the VPC resources that each account has different access to. For more information, see link:vpc/latest/userguide/vpc-sharing.html#vpc-share-limitations[Responsibilities and permissions for owners and participants,type="documentation"] in the _Amazon VPC User Guide_. +* If you use the _custom networking_ feature of the [.noloc]`Amazon VPC CNI plugin for Kubernetes`, you need to use the Availability Zone ID mappings listed in the owner account to create each `ENIConfig`. For more information, see <>. + +For more information about VPC subnet sharing, see link:vpc/latest/userguide/vpc-sharing.html#vpc-share-limitations[Share your VPC with other accounts,type="documentation"] in the _Amazon VPC User Guide_. diff --git a/latest/ug/networking/sec-group-reqs.adoc b/latest/ug/networking/sec-group-reqs.adoc new file mode 100644 index 00000000..7eeed60a --- /dev/null +++ b/latest/ug/networking/sec-group-reqs.adoc @@ -0,0 +1,152 @@ +//!!NODE_ROOT
+[.topic] +[[sec-group-reqs,sec-group-reqs.title]] += View Amazon EKS security group requirements for clusters +:info_doctype: section +:info_title: View Amazon EKS security group requirements for clusters +:info_titleabbrev: Security group requirements +:info_abstract: Learn how to manage security groups for Amazon EKS clusters, including default \ + rules, restricting traffic, and required outbound access for nodes to function \ + properly with your cluster. Understand key security group considerations for secure \ + operation of your Kubernetes cluster on {aws}. + +include::../attributes.txt[] + +[abstract] +-- +Learn how to manage security groups for Amazon EKS clusters, including default rules, restricting traffic, and required outbound access for nodes to function properly with your cluster. Understand key security group considerations for secure operation of your [.noloc]`Kubernetes` cluster on {aws}. +-- + +This topic describes the security group requirements of an Amazon EKS cluster. + +[[security-group-default-rules,security-group-default-rules.title]] +== Default cluster security group + +When you create a cluster, Amazon EKS creates a security group that's named `eks-cluster-sg-[.replaceable]``my-cluster``-[.replaceable]``uniqueID```. This security group has the following default rules: + +[cols="1,1,1,1,1", options="header"] +|=== +|Rule type +|Protocol +|Ports +|Source +|Destination + + +|Inbound +|All +|All +|Self +| + +|Outbound +|All +|All +| +|0.0.0.0/0(`IPv4`) or ::/0 (`IPv6`) +|=== + +[IMPORTANT] +==== + +If your cluster doesn't need the outbound rule, you can remove it. If you remove it, you must still have the minimum rules listed in <>. If you remove the inbound rule, Amazon EKS recreates it whenever the cluster is updated. + +==== + +Amazon EKS adds the following tags to the security group. If you remove the tags, Amazon EKS adds them back to the security group whenever your cluster is updated. + +[cols="1,1", options="header"] +|=== +|Key +|Value + + +|`kubernetes.io/cluster/[.replaceable]``my-cluster``` +|`owned` + +|``aws:eks:cluster-name`` +|[.replaceable]`my-cluster` + +|`Name` +|`eks-cluster-sg-[.replaceable]``my-cluster``-[.replaceable]``uniqueid``` +|=== + +Amazon EKS automatically associates this security group to the following resources that it also creates: + + + +* 2–4 elastic network interfaces (referred to for the rest of this document as _network interface_) that are created when you create your cluster. +* Network interfaces of the nodes in any managed node group that you create. + +The default rules allow all traffic to flow freely between your cluster and nodes, and allows all outbound traffic to any destination. When you create a cluster, you can (optionally) specify your own security groups. If you do, then Amazon EKS also associates the security groups that you specify to the network interfaces that it creates for your cluster. However, it doesn't associate them to any node groups that you create. + +You can determine the ID of your cluster security group in the {aws-management-console} under the cluster's *Networking* section. Or, you can do so by running the following {aws} CLI command. + +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query cluster.resourcesVpcConfig.clusterSecurityGroupId +---- + + +[[security-group-restricting-cluster-traffic,security-group-restricting-cluster-traffic.title]] +== Restricting cluster traffic + +If you need to limit the open ports between the cluster and nodes, you can remove the <> and add the following minimum rules that are required for the cluster. If you remove the <>, Amazon EKS recreates it whenever the cluster is updated. + +[cols="1,1,1,1", options="header"] +|=== +|Rule type +|Protocol +|Port +|Destination + + +|Outbound +|TCP +|443 +|Cluster security group + +|Outbound +|TCP +|10250 +|Cluster security group + +|Outbound (DNS) +|TCP and UDP +|53 +|Cluster security group +|=== + +You must also add rules for the following traffic: + + + +* Any protocol and ports that you expect your nodes to use for inter-node communication. +* Outbound internet access so that nodes can access the Amazon EKS APIs for cluster introspection and node registration at launch time. If your nodes don't have internet access, review <> for additional considerations. +* Node access to pull container images from Amazon ECR or other container registries APIs that they need to pull images from, such as [.noloc]`DockerHub`. For more information, see link:general/latest/gr/aws-ip-ranges.html[{aws} IP address ranges,type="documentation"] in the {aws} General Reference. +* Node access to Amazon S3. +* Separate rules are required for `IPv4` and `IPv6` addresses. +* If you are using hybrid nodes, you must add an additional security group to your cluster to allow communication with your on-premises nodes and pods. For more information, see <>. + +If you're considering limiting the rules, we recommend that you thoroughly test all of your [.noloc]`Pods` before you apply your changed rules to a production cluster. + +If you originally deployed a cluster with [.noloc]`Kubernetes` `1.14` and a platform version of `eks.3` or earlier, then consider the following: + + + +* You might also have control plane and node security groups. When these groups were created, they included the restricted rules listed in the previous table. These security groups are no longer required and can be removed. However, you need to make sure your cluster security group contains the rules that those groups contain. +* If you deployed the cluster using the API directly or you used a tool such as the {aws} CLI or {aws} CloudFormation to create the cluster and you didn't specify a security group at cluster creation, then the default security group for the VPC was applied to the cluster network interfaces that Amazon EKS created. + +== Shared security groups + +Amazon EKS supports shared security groups. + +* *Security Group VPC Associations* associate security groups with multiple VPCs in the same account and region. +** Learn how to link:vpc/latest/userguide/security-group-assoc.html["Associate security groups with multiple VPCs",type="documentation"] in the _Amazon VPC User Guide_. +* *Shared security groups* enable you to share security groups with other {aws} accounts. The accounts must be in the same {aws} organization. +** Learn how to link:vpc/latest/userguide/security-group-sharing.html["Share security groups with organizations",type="documentation"] in the _Amazon VPC User Guide_. +* Security groups are always limited to a single {aws} region. + +=== Considerations for Amazon EKS + +* EKS has the same requirements of shared or multi-VPC security groups as standard security groups. diff --git a/latest/ug/nodes/al2023.adoc b/latest/ug/nodes/al2023.adoc new file mode 100644 index 00000000..0f44be2f --- /dev/null +++ b/latest/ug/nodes/al2023.adoc @@ -0,0 +1,49 @@ +//!!NODE_ROOT
+[.topic] +[[al2023,al2023.title]] += Upgrade from Amazon Linux 2 to Amazon Linux 2023 +:info_titleabbrev: Upgrade from AL2 to AL2023 + +include::../attributes.txt[] + +[abstract] +-- +AL2023 is a new Linux-based operating system designed to provide a secure, stable, and high-performance environment for your cloud applications. +-- + +The Amazon EKS optimized AMIs are available in two families based on AL2 and AL2023. AL2023 is a new Linux-based operating system designed to provide a secure, stable, and high-performance environment for your cloud applications. It's the next generation of Amazon Linux from Amazon Web Services and is available across all supported Amazon EKS versions, including versions `1.23` and `1.24` in extended support. + +AL2023 offers several improvements over AL2. For a full comparison, see link:linux/al2023/ug/compare-with-al2.html[Comparing AL2 and Amazon Linux 2023,type="documentation"] in the _Amazon Linux 2023 User Guide_. Several packages have been added, upgraded, and removed from AL2. It's highly recommended to test your applications with AL2023 before upgrading. For a list of all package changes in AL2023, see link:linux/al2023/release-notes/compare-packages.html[Package changes in Amazon Linux 2023,type="documentation"] in the _Amazon Linux 2023 Release Notes_. + +In addition to these changes, you should be aware of the following: + +* AL2023 introduces a new node initialization process `nodeadm` that uses a YAML configuration schema. If you're using self-managed node groups or an AMI with a launch template, you'll now need to provide additional cluster metadata explicitly when creating a new node group. An https://awslabs.github.io/amazon-eks-ami/nodeadm/[example] of the minimum required parameters is as follows, where `apiServerEndpoint`, `certificateAuthority`, and service `cidr` are now required: ++ +[source,yaml,subs="verbatim,attributes"] +---- +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: my-cluster + apiServerEndpoint: https://example.com + certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk= + cidr: 10.100.0.0/16 +---- ++ +In AL2, the metadata from these parameters was discovered from the Amazon EKS `DescribeCluster` API call. With AL2023, this behavior has changed since the additional API call risks throttling during large node scale ups. This change doesn't affect you if you're using managed node groups without a launch template or if you're using [.noloc]`Karpenter`. For more information on `certificateAuthority` and service `cidr`, see ` link:eks/latest/APIReference/API_DescribeCluster.html[DescribeCluster,type="documentation"]` in the _Amazon EKS API Reference_. +* [.noloc]`Docker` isn't supported in AL2023 for all supported Amazon EKS versions. Support for [.noloc]`Docker` has ended and been removed with Amazon EKS version `1.24` or greater in AL2. For more information on deprecation, see <>. +* Amazon VPC CNI version `1.16.2` or greater is required for AL2023. +* AL2023 requires `IMDSv2` by default. `IMDSv2` has several benefits that help improve security posture. It uses a session-oriented authentication method that requires the creation of a secret token in a simple HTTP PUT request to start the session. A session's token can be valid for anywhere between 1 second and 6 hours. For more information on how to transition from `IMDSv1` to `IMDSv2`, see link:AWSEC2/latest/UserGuide/instance-metadata-transition-to-version-2.html[Transition to using Instance Metadata Service Version 2,type="documentation"] and link:security/get-the-full-benefits-of-imdsv2-and-disable-imdsv1-across-your-aws-infrastructure[Get the full benefits of IMDSv2 and disable IMDSv1 across your {aws} infrastructure,type="blog"]. If you would like to use `IMDSv1`, you can still do so by manually overriding the settings using instance metadata option launch properties. ++ +NOTE: For `IMDSv2`, the default hop count for managed node groups is set to 1. This means that containers won't have access to the node's credentials using IMDS. If you require container access to the node's credentials, you can still do so by manually overriding the `HttpPutResponseHopLimit` in a link:AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-metadataoptions.html[custom Amazon EC2 launch template,type="documentation"], increasing it to 2.Alternatively, you can use <> to provide credentials instead of `IMDSv2`. +* AL2023 features the next generation of unified control group hierarchy (`cgroupv2`). `cgroupv2` is used to implement a container runtime, and by `systemd`. While AL2023 still includes code that can make the system run using `cgroupv1`, this isn't a recommended or supported configuration. This configuration will be completely removed in a future major release of Amazon Linux. +* `eksctl` version `0.176.0` or greater is required for `eksctl` to support AL2023. + +For previously existing managed node groups, you can either perform an in-place upgrade or a blue/green upgrade depending on how you're using a launch template: + +* If you're using a custom AMI with a managed node group, you can perform an in-place upgrade by swapping the AMI ID in the launch template. You should ensure that your applications and any user data transfer over to AL2023 first before performing this upgrade strategy. +* If you're using managed node groups with either the standard launch template or with a custom launch template that doesn't specify the AMI ID, you're required to upgrade using a blue/green strategy. A blue/green upgrade is typically more complex and involves creating an entirely new node group where you would specify AL2023 as the AMI type. The new node group will need to then be carefully configured to ensure that all custom data from the AL2 node group is compatible with the new OS. Once the new node group has been tested and validated with your applications, [.noloc]`Pods` can be migrated from the old node group to the new node group. Once the migration is completed, you can delete the old node group. + +If you're using [.noloc]`Karpenter` and want to use AL2023, you'll need to modify the `EC2NodeClass` `amiFamily` field with AL2023. By default, Drift is enabled in [.noloc]`Karpenter`. This means that once the `amiFamily` field has been changed, [.noloc]`Karpenter` will automatically update your worker nodes to the latest AMI when available. diff --git a/latest/ug/nodes/auto-get-logs.adoc b/latest/ug/nodes/auto-get-logs.adoc new file mode 100644 index 00000000..be4fac09 --- /dev/null +++ b/latest/ug/nodes/auto-get-logs.adoc @@ -0,0 +1,128 @@ +//!!NODE_ROOT
+[.topic] +[[auto-get-logs,auto-get-logs.title]] += Retrieve node logs for a managed node using kubectl and S3 +:info_titleabbrev: Get node logs + +include::../attributes.txt[] + +[abstract] +-- +Learn how to retrieve node logs for an Amazon EKS managed node that has the node monitoring agent. +-- + +Learn how to retrieve node logs for an Amazon EKS managed node that has the node monitoring agent. + +== Prerequisites + +Make sure you have the following: + +* An existing Amazon EKS cluster with the node monitoring agent. For more information, see <>. +* The `kubectl` command-line tool installed and configured to +communicate with your cluster. +* The {aws} CLI installed and logged in with sufficent permissions to +create S3 buckets and objects. +* A recent version of Python 3 installed +* The {aws} SDK for Python 3, Boto 3, installed. + +== Step 1: Create S3 bucket destination (optional) + +If you don't already have an S3 bucket to store the logs, create one. Use the following {aws} CLI command. The bucket defaults to the `private` access control list. Replace [.replaceable]`bucket-name` with your chosen unique bucket name. + +[source,bash,subs="verbatim,attributes,quotes"] +---- +aws s3api create-bucket --bucket [.replaceable]`bucket-name` +---- + +== Step 2: Create pre-signed S3 URL for HTTP Put + +Amazon EKS returns the node logs by doing a HTTP PUT operation to a URL you specify. In this tutorial, we will generate a pre-signed S3 HTTP PUT URL. + +The logs will be returned as a gzip tarball, with the `.tar.gz` extension. + +[NOTE] +==== +You must use the {aws} API or a SDK to create the pre-signed S3 upload URL for EKS to upload the log file. You cannot create a pre-signed S3 upload URL using the {aws} CLI. +==== + +. Determine where in the bucket you want to store the logs. For example, you might use `2024-11-12/logs1.tar.gz` as the key. +. Save the following Python code to the file `presign-upload.py`. Replace `` and ``. The key should end with `.tar.gz`. ++ +[source,python,subs="verbatim,attributes"] +---- +import boto3; print(boto3.client('s3').generate_presigned_url( + ClientMethod='put_object', + Params={'Bucket': '', 'Key': ''}, + ExpiresIn=1000 +)) +---- +. Run the script with ++ +[source,cli] +---- +python presign-upload.py +---- +. Note the URL output. Use this value in the next step as the [.replaceacble]`http-put-destination`. + + +For more information, see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html#generating-a-presigned-url-to-upload-a-file[Generate a presigned URL to upload a file] in the {aws} Boto3 SDK for Python Documentation. + +== Step 3: Create NodeDiagnostic resource + +Identify the name of the node you want to collect logs from. + +Create a `NodeDiagnostic` manifest that uses the name of the node as the +resource's name, and providing a HTTP PUT URL destination. + +[source,bash,subs="verbatim,attributes,quotes"] +---- +apiVersion: eks.amazonaws.com/v1alpha1 +kind: NodeDiagnostic +metadata: + name: [.replaceable]`node-name` +spec: + logCapture: + destination: [.replaceable]`http-put-destination` +---- + +Apply the manifest to the cluster. + +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f nodediagnostic.yaml +---- + +You can check on the Status of the collection by describing the +`NodeDiagnostic` resource: + +* A status of `Success` or `SuccessWithErrors` indicates that the task + completed and the logs uploaded to the provided destination + (`SuccessWithErrors` indicates that some logs might be missing) +* If the status is Failure, confirm the upload URL is well-formed and not expired. + +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl describe nodediagnostics.eks.amazonaws.com/[.replaceable]`node-name` +---- + +== Step 4: Download logs from S3 + +Wait approximately one minute before attempting to download the logs. Then, use the S3 CLI to download the logs. + +[source,bash,subs="verbatim,attributes,quotes"] +---- +# Once NodeDiagnostic shows Success status, download the logs +aws s3 cp s3://[.replaceable]`bucket-name`/[.replaceable]`key` ./node-logs.tar.gz +---- + +== Step 5: Clean up NodeDiagnostic resource + +* `NodeDiagnostic` resources do not get automatically deleted. You +should clean these up on your own after you have obtained your log +artifacts + +[source,bash,subs="verbatim,attributes,quotes"] +---- +# Delete the NodeDiagnostic resource +kubectl delete nodediagnostics.eks.amazonaws.com/[.replaceable]`node-name` +---- diff --git a/latest/ug/nodes/bottlerocket-compliance-support.adoc b/latest/ug/nodes/bottlerocket-compliance-support.adoc new file mode 100644 index 00000000..2977db31 --- /dev/null +++ b/latest/ug/nodes/bottlerocket-compliance-support.adoc @@ -0,0 +1,18 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[bottlerocket-compliance-support,bottlerocket-compliance-support.title]] += Meet compliance requirements with [.noloc]`Bottlerocket` +:info_titleabbrev: Compliance support + +[abstract] +-- +[.noloc]`Bottlerocket` complies with recommendations defined by various organizations. +-- + +[.noloc]`Bottlerocket` complies with recommendations defined by various organizations: + +* There is a https://www.cisecurity.org/benchmark/bottlerocket[CIS Benchmark] defined for [.noloc]`Bottlerocket`. In a default configuration, [.noloc]`Bottlerocket` image has most of the controls required by CIS Level 1 configuration profile. You can implement the controls required for a CIS Level 2 configuration profile. For more information, see link:containers/validating-amazon-eks-optimized-bottlerocket-ami-against-the-cis-benchmark[Validating Amazon EKS optimized Bottlerocket AMI against the CIS Benchmark,type="blog"] on the {aws} blog. +* The optimized feature set and reduced attack surface means that [.noloc]`Bottlerocket` instances require less configuration to satisfy PCI DSS requirements. The https://www.cisecurity.org/benchmark/bottlerocket[CIS Benchmark for Bottlerocket] is an excellent resource for hardening guidance, and supports your requirements for secure configuration standards under PCI DSS requirement 2.2. You can also leverage https://opensearch.org/blog/technical-post/2022/07/bottlerocket-k8s-fluent-bit/[Fluent Bit] to support your requirements for operating system level audit logging under PCI DSS requirement 10.2. {aws} publishes new (patched) [.noloc]`Bottlerocket` instances periodically to help you meet PCI DSS requirement 6.2 (for v3.2.1) and requirement 6.3.3 (for v4.0). +* [.noloc]`Bottlerocket` is an HIPAA-eligible feature authorized for use with regulated workloads for both Amazon EC2 and Amazon EKS. For more information, see the link:pdfs/whitepapers/latest/architecting-hipaa-security-and-compliance-on-amazon-eks/architecting-hipaa-security-and-compliance-on-amazon-eks.pdf[Architecting for HIPAA Security and Compliance on Amazon EKS,type="documentation"] whitepaper. +* [.noloc]`Bottlerocket` AMIs are available that are preconfigured to use FIPS 140-3 validated cryptographic modules. This includes the Amazon Linux 2023 Kernel Crypto API Cryptographic Module and the {aws}-LC Cryptographic Module. For more information on selecting FIPS-enabled variants, see <>. diff --git a/latest/ug/nodes/choosing-instance-type.adoc b/latest/ug/nodes/choosing-instance-type.adoc new file mode 100644 index 00000000..9b1fa846 --- /dev/null +++ b/latest/ug/nodes/choosing-instance-type.adoc @@ -0,0 +1,117 @@ +//!!NODE_ROOT
+include::../attributes.txt[] + +[.topic] +[[choosing-instance-type,choosing-instance-type.title]] += Choose an optimal Amazon EC2 node instance type +:info_doctype: section +:info_title: Choose an optimal Amazon EC2 node instance type +:info_titleabbrev: Amazon EC2 instance types +:keywords: choose, select, instance, type, family, group, max-pods, max pods, maximum pods +:info_abstract: Each Amazon EC2 instance type offers different compute, memory, storage, and network \ + capabilities. + +[abstract] +-- +Each Amazon EC2 instance type offers different compute, memory, storage, and network capabilities. +-- + +Amazon EC2 provides a wide selection of instance types for worker nodes. Each instance type offers different compute, memory, storage, and network capabilities. Each instance is also grouped in an instance family based on these capabilities. For a list, see link:AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes[Available instance types,type="documentation"] in the _Amazon EC2 User Guide_. Amazon EKS releases several variations of Amazon EC2 AMIs to enable support. To make sure that the instance type you select is compatible with Amazon EKS, consider the following criteria. + + + +* All Amazon EKS AMIs don't currently support the `g5g` and `mac` families. +* [.noloc]`Arm` and non-accelerated Amazon EKS AMIs don't support the `g3`, `g4`, `inf`, and `p` families. +* Accelerated Amazon EKS AMIs don't support the `a`, `c`, `hpc`, `m`, and `t` families. +* For Arm-based instances, Amazon Linux 2023 (AL2023) only supports instance types that use [.noloc]`Graviton2` or later processors. AL2023 doesn't support `A1` instances. + +When choosing between instance types that are supported by Amazon EKS, consider the following capabilities of each type. + + + +*Number of instances in a node group*:: +In general, fewer, larger instances are better, especially if you have a lot of [.noloc]`Daemonsets`. Each instance requires API calls to the API server, so the more instances you have, the more load on the API server. + + +*Operating system*:: +Review the supported instance types for link:AWSEC2/latest/UserGuide/instance-types.html[Linux,type="documentation"], link:AWSEC2/latest/WindowsGuide/instance-types.html[Windows,type="documentation"], and link:bottlerocket/faqs/[Bottlerocket,type="marketing"]. Before creating [.noloc]`Windows` instances, review <>. + + +*Hardware architecture*:: +Do you need [.noloc]`x86` or [.noloc]`Arm`? Before deploying [.noloc]`Arm` instances, review <>. Do you need instances built on the [.noloc]`Nitro System` ( link:AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances[Linux,type="documentation"] or link:AWSEC2/latest/WindowsGuide/instance-types.html#ec2-nitro-instances[Windows,type="documentation"]) or that have link:AWSEC2/latest/WindowsGuide/accelerated-computing-instances.html[Accelerated,type="documentation"] capabilities? If you need accelerated capabilities, you can only use [.noloc]`Linux` with Amazon EKS. + + +*Maximum number of [.noloc]`Pods`*:: +Since each [.noloc]`Pod` is assigned its own IP address, the number of IP addresses supported by an instance type is a factor in determining the number of [.noloc]`Pods` that can run on the instance. To manually determine how many [.noloc]`Pods` an instance type supports, see <>. ++`NOTE: If you're using an Amazon EKS optimized Amazon Linux 2 AMI that's `v20220406` or newer, you can use a new instance type without upgrading to the latest AMI. For these AMIs, the AMI auto-calculates the necessary `max-pods` value if it isn't listed in the https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt[eni-max-pods.txt] file. Instance types that are currently in preview may not be supported by Amazon EKS by default. Values for max-pods` for such types still need to be added to `eni-max-pods.txt` in our AMI. ++ +link:ec2/nitro/[{aws} Nitro System,type="marketing"] instance types optionally support significantly more IP addresses than non-Nitro System instance types. However, not all IP addresses assigned for an instance are available to [.noloc]`Pods`. To assign a significantly larger number of IP addresses to your instances, you must have version `1.9.0` or later of the Amazon VPC CNI add-on installed in your cluster and configured appropriately. For more information, see <>. To assign the largest number of IP addresses to your instances, you must have version `1.10.1` or later of the Amazon VPC CNI add-on installed in your cluster and deploy the cluster with the `IPv6` family. + + +*IP family*:: +You can use any supported instance type when using the `IPv4` family for a cluster, which allows your cluster to assign private `IPv4` addresses to your [.noloc]`Pods` and Services. But if you want to use the `IPv6` family for your cluster, then you must use link:ec2/nitro/[{aws} Nitro System,type="marketing"] instance types or bare metal instance types. Only `IPv4` is supported for [.noloc]`Windows` instances. Your cluster must be running version `1.10.1` or later of the Amazon VPC CNI add-on. For more information about using `IPv6`, see <>. + + +*Version of the Amazon VPC CNI add-on that you're running*:: +The latest version of the https://github.com/aws/amazon-vpc-cni-k8s[Amazon VPC CNI plugin for Kubernetes] supports https://github.com/aws/amazon-vpc-cni-k8s/blob/master/pkg/vpc/vpc_ip_resource_limit.go[these instance types]. You may need to update your Amazon VPC CNI add-on version to take advantage of the latest supported instance types. For more information, see <>. The latest version supports the latest features for use with Amazon EKS. Earlier versions don't support all features. You can view features supported by different versions in the https://github.com/aws/amazon-vpc-cni-k8s/blob/master/CHANGELOG.md[Changelog] on [.noloc]`GitHub`. + + +*{aws} Region that you're creating your nodes in*:: +Not all instance types are available in all {aws} Regions. + + +*Whether you're using security groups for [.noloc]`Pods`*:: +If you're using security groups for [.noloc]`Pods`, only specific instance types are supported. For more information, see <>. + + +[[determine-max-pods,determine-max-pods.title]] +== Amazon EKS recommended maximum [.noloc]`Pods` for each Amazon EC2 instance type + +Since each [.noloc]`Pod` is assigned its own IP address, the number of IP addresses supported by an instance type is a factor in determining the number of [.noloc]`Pods` that can run on the instance. Amazon EKS provides a script that you can download and run to determine the Amazon EKS recommended maximum number of [.noloc]`Pods` to run on each instance type. The script uses hardware attributes of each instance, and configuration options, to determine the maximum [.noloc]`Pods` number. You can use the number returned in these steps to enable capabilities such as <> and <>. If you're using a managed node group with multiple instance types, use a value that would work for all instance types. + +. Download a script that you can use to calculate the maximum number of [.noloc]`Pods` for each instance type. ++ +[source,bash,subs="verbatim,attributes"] +---- +curl -O https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/templates/al2/runtime/max-pods-calculator.sh +---- +. Mark the script as executable on your computer. ++ +[source,bash,subs="verbatim,attributes"] +---- +chmod +x max-pods-calculator.sh +---- +. Run the script, replacing [.replaceable]`m5.large` with the instance type that you plan to deploy and [.replaceable]`1.9.0-eksbuild.1` with your Amazon VPC CNI add-on version. To determine your add-on version, see the update procedures in <>. ++ +[source,bash,subs="verbatim,attributes"] +---- +./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0-eksbuild.1 +---- ++ +An example output is as follows. ++ +[source,bash,subs="verbatim,attributes"] +---- +29 +---- ++ +You can add the following options to the script to see the maximum [.noloc]`Pods` supported when using optional capabilities. ++ +** `--cni-custom-networking-enabled` – Use this option when you want to assign IP addresses from a different subnet than your instance's. For more information, see <>. Adding this option to the previous script with the same example values yields `20`. +** `--cni-prefix-delegation-enabled` – Use this option when you want to assign significantly more IP addresses to each elastic network interface. This capability requires an Amazon Linux instance that run on the Nitro System and version `1.9.0` or later of the Amazon VPC CNI add-on. For more information, see <>. Adding this option to the previous script with the same example values yields `110`. + +You can also run the script with the `--help` option to see all available options. + +[NOTE] +==== + +The max [.noloc]`Pods` calculator script limits the return value to `110` based on https://github.com/kubernetes/community/blob/master/sig-scalability/configs-and-limits/thresholds.md[Kubernetes scalability thresholds] and recommended settings. If your instance type has greater than 30 vCPUs, this limit jumps to `250`, a number based on internal Amazon EKS scalability team testing. For more information, see the link:containers/amazon-vpc-cni-increases-pods-per-node-limits[Amazon VPC CNI plugin increases pods per node limits,type="blog"] blog post. + +==== + +== Considerations for EKS Auto Mode + +EKS Auto Mode limits the number of pods on nodes to the lower of: + +* 110 pods hard cap +* The result of the max pods calculation described above. diff --git a/latest/ug/nodes/create-managed-node-group.adoc b/latest/ug/nodes/create-managed-node-group.adoc new file mode 100644 index 00000000..a508021a --- /dev/null +++ b/latest/ug/nodes/create-managed-node-group.adoc @@ -0,0 +1,252 @@ +//!!NODE_ROOT
+[.topic] +[[create-managed-node-group,create-managed-node-group.title]] += Create a managed node group for your cluster +:info_titleabbrev: Create + +include::../attributes.txt[] + +[abstract] +-- +This topic describes how you can launch Amazon EKS managed node groups of nodes that register with your Amazon EKS cluster. +-- + +This topic describes how you can launch Amazon EKS managed node groups of nodes that register with your Amazon EKS cluster. After the nodes join the cluster, you can deploy [.noloc]`Kubernetes` applications to them. + +If this is your first time launching an Amazon EKS managed node group, we recommend that you instead follow one of our guides in <>. These guides provide walkthroughs for creating an Amazon EKS cluster with nodes. + +[IMPORTANT] +==== + +* Amazon EKS nodes are standard Amazon EC2 instances. You're billed based on the normal Amazon EC2 prices. For more information, see link:ec2/pricing/[Amazon EC2 Pricing,type="marketing"]. +* You can't create managed nodes in an {aws} Region where you have {aws} Outposts or {aws} Wavelength enabled. You can create self-managed nodes instead. For more information, see <>, <>, and <>. You can also create a self-managed Amazon Linux node group on an Outpost. For more information, see <>. +* If you don't <> for the `bootstrap.sh` file included with Amazon EKS optimized Linux or Bottlerocket, managed node groups enforce a maximum number on the value of `maxPods`. For instances with less than 30 vCPUs, the maximum number is `110`. For instances with greater than 30 vCPUs, the maximum number jumps to `250`. These numbers are based on https://github.com/kubernetes/community/blob/master/sig-scalability/configs-and-limits/thresholds.md[Kubernetes scalability thresholds] and recommended settings by internal Amazon EKS scalability team testing. For more information, see the link:containers/amazon-vpc-cni-increases-pods-per-node-limits[Amazon VPC CNI plugin increases pods per node limits,type="blog"] blog post. + +==== + +* An existing Amazon EKS cluster. To deploy one, see <>. +* An existing IAM role for the nodes to use. To create one, see <>. If this role doesn't have either of the policies for the VPC CNI, the separate role that follows is required for the VPC CNI pods. +* (Optional, but recommended) The [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-on configured with its own IAM role that has the necessary IAM policy attached to it. For more information, see <>. +* Familiarity with the considerations listed in <>. Depending on the instance type you choose, there may be additional prerequisites for your cluster and VPC. +* To add a [.noloc]`Windows` managed node group, you must first enable [.noloc]`Windows` support for your cluster. For more information, see <>. + +You can create a managed node group with either of the following: + +* <> +* <> + +== `eksctl` [[eksctl_create_managed_nodegroup]] + +*Create a managed node group with eksctl* + +This procedure requires `eksctl` version `{eksctl-min-version}` or later. You can check your version with the following command: + +[source,bash,subs="verbatim,attributes"] +---- +eksctl version +---- + +For instructions on how to install or upgrade `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. + +. (Optional) If the *AmazonEKS_CNI_Policy* managed IAM policy is attached to your <>, we recommend assigning it to an IAM role that you associate to the [.noloc]`Kubernetes` `aws-node` service account instead. For more information, see <>. +. Create a managed node group with or without using a custom launch template. Manually specifying a launch template allows for greater customization of a node group. For example, it can allow deploying a custom AMI or providing arguments to the `boostrap.sh` script in an Amazon EKS optimized AMI. For a complete list of every available option and default, enter the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup --help +---- ++ +In the following command, replace [.replaceable]`my-cluster` with the name of your cluster and replace [.replaceable]`my-mng` with the name of your node group. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. ++ +[IMPORTANT] +==== +If you don't use a custom launch template when first creating a managed node group, don't use one at a later time for the node group. If you didn't specify a custom launch template, the system auto-generates a launch template that we don't recommend that you modify manually. Manually modifying this auto-generated launch template might cause errors. +==== + +*Without a launch template* + +`eksctl` creates a default Amazon EC2 launch template in your account and deploys the node group using a launch template that it creates based on options that you specify. Before specifying a value for `--node-type`, see <>. + +Replace [.replaceable]`ami-family` with an allowed keyword. For more information, see https://eksctl.io/usage/custom-ami-support/#setting-the-node-ami-family[Setting the node AMI Family] in the `eksctl` documentation. Replace [.replaceable]`my-key` with the name of your Amazon EC2 key pair or public key. This key is used to SSH into your nodes after they launch. + +NOTE: For [.noloc]`Windows`, this command doesn't enable SSH. Instead, it associates your Amazon EC2 key pair with the instance and allows you to RDP into the instance. + +If you don't already have an Amazon EC2 key pair, you can create one in the {aws-management-console}. For [.noloc]`Linux` information, see link:AWSEC2/latest/UserGuide/ec2-key-pairs.html[Amazon EC2 key pairs and Linux instances,type="documentation"] in the _Amazon EC2 User Guide_. For [.noloc]`Windows` information, see link:AWSEC2/latest/WindowsGuide/ec2-key-pairs.html[Amazon EC2 key pairs and Windows instances,type="documentation"] in the _Amazon EC2 User Guide_. + +We recommend blocking [.noloc]`Pod` access to IMDS if the following conditions are true: + +* You plan to assign IAM roles to all of your [.noloc]`Kubernetes` service accounts so that [.noloc]`Pods` only have the minimum permissions that they need. + +* No [.noloc]`Pods` in the cluster require access to the Amazon EC2 instance metadata service (IMDS) for other reasons, such as retrieving the current {aws} Region. + +For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. + +If you want to block [.noloc]`Pod` access to IMDS, then add the `--disable-pod-imds` option to the following command. + +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup \ + --cluster my-cluster \ + --region region-code \ + --name my-mng \ + --node-ami-family ami-family \ + --node-type m5.large \ + --nodes 3 \ + --nodes-min 2 \ + --nodes-max 4 \ + --ssh-access \ + --ssh-public-key my-key +---- + +Your instances can optionally assign a significantly higher number of IP addresses to [.noloc]`Pods`, assign IP addresses to [.noloc]`Pods` from a different CIDR block than the instance's, and be deployed to a cluster without internet access. For more information, see <>, <>, and <> for additional options to add to the previous command. + +Managed node groups calculates and applies a single value for the maximum number of [.noloc]`Pods` that can run on each node of your node group, based on instance type. If you create a node group with different instance types, the smallest value calculated across all instance types is applied as the maximum number of [.noloc]`Pods` that can run on every instance type in the node group. Managed node groups calculates the value using the script referenced in <>. + +*With a launch template* + +The launch template must already exist and must meet the requirements specified in <>. +We recommend blocking [.noloc]`Pod` access to IMDS if the following conditions are true: + +* You plan to assign IAM roles to all of your [.noloc]`Kubernetes` service accounts so that [.noloc]`Pods` only have the minimum permissions that they need. + +* No [.noloc]`Pods` in the cluster require access to the Amazon EC2 instance metadata service (IMDS) for other reasons, such as retrieving the current {aws} Region. + +For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. + +If you want to block [.noloc]`Pod` access to IMDS, then specify the necessary settings in the launch template. + +[loweralpha] +.. Copy the following contents to your device. Replace the [.replaceable]`example values` and then run the modified command to create the `eks-nodegroup.yaml` file. Several settings that you specify when deploying without a launch template are moved into the launch template. If you don't specify a `version`, the template's default version is used. ++ +[source,yaml,subs="verbatim,attributes"] +---- +cat >eks-nodegroup.yaml <>, <>, <>, and <> for additional options to add to the config file. ++ +If you didn't specify an AMI ID in your launch template, managed node groups calculates and applies a single value for the maximum number of [.noloc]`Pods` that can run on each node of your node group, based on instance type. If you create a node group with different instance types, the smallest value calculated across all instance types is applied as the maximum number of [.noloc]`Pods` that can run on every instance type in the node group. Managed node groups calculates the value using the script referenced in <>. ++ +If you specified an AMI ID in your launch template, specify the maximum number of [.noloc]`Pods` that can run on each node of your node group if you're using <> or want to <>. For more information, see <>. + +.. Deploy the nodegroup with the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup --config-file eks-nodegroup.yaml +---- + +== {aws-management-console} [[console_create_managed_nodegroup]] + +*Create a managed node group using the {aws-management-console}* + +. Wait for your cluster status to show as `ACTIVE`. You can't create a managed node group for a cluster that isn't already `ACTIVE`. +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the name of the cluster that you want to create a managed node group in. +. Select the *Compute* tab. +. Choose *Add node group*. +. On the *Configure node group* page, fill out the parameters accordingly, and then choose *Next*. ++ +** *Name* – Enter a unique name for your managed node group. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. +** *Node IAM role* – Choose the node instance role to use with your node group. For more information, see <>. + ++ +[IMPORTANT] +==== +**** You can't use the same role that is used to create any clusters. +**** We recommend using a role that's not currently in use by any self-managed node group. Otherwise, you plan to use with a new self-managed node group. For more information, see <>. +==== + +*** *Use launch template* – (Optional) Choose if you want to use an existing launch template. Select a *Launch Template Name*. Then, select a *Launch template version*. If you don't select a version, then Amazon EKS uses the template's default version. Launch templates allow for more customization of your node group, such as allowing you to deploy a custom AMI, assign a significantly higher number of IP addresses to [.noloc]`Pods`, assign IP addresses to [.noloc]`Pods` from a different CIDR block than the instance's, enable the `containerd` runtime for your instances, and deploying nodes to a cluster without outbound internet access. For more information, see <>, <>, <>, and <>. ++ +The launch template must meet the requirements in <>. If you don't use your own launch template, the Amazon EKS API creates a default Amazon EC2 launch template in your account and deploys the node group using the default launch template. ++ +If you implement <>, assign necessary permissions directly to every [.noloc]`Pod` that requires access to {aws} services, and no [.noloc]`Pods` in your cluster require access to IMDS for other reasons, such as retrieving the current {aws} Region, then you can also disable access to IMDS for [.noloc]`Pods` that don't use host networking in a launch template. For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. +*** *[.noloc]`Kubernetes` labels* – (Optional) You can choose to apply [.noloc]`Kubernetes` labels to the nodes in your managed node group. +*** *[.noloc]`Kubernetes` taints* – (Optional) You can choose to apply [.noloc]`Kubernetes` taints to the nodes in your managed node group. The available options in the *Effect* menu are `*NoSchedule*`, `*NoExecute*`, and `*PreferNoSchedule*`. For more information, see <>. +*** *Tags* – (Optional) You can choose to tag your Amazon EKS managed node group. These tags don't propagate to other resources in the node group, such as Auto Scaling groups or instances. For more information, see <>. +. On the *Set compute and scaling configuration* page, fill out the parameters accordingly, and then choose *Next*. ++ +*** *AMI type* – Select an AMI type.If you are deploying Arm instances, be sure to review the considerations in <> before deploying. ++ +If you specified a launch template on the previous page, and specified an AMI in the launch template, then you can't select a value. The value from the template is displayed. The AMI specified in the template must meet the requirements in <>. +*** *Capacity type* – Select a capacity type. For more information about choosing a capacity type, see <>. You can't mix different capacity types within the same node group. If you want to use both capacity types, create separate node groups, each with their own capacity and instance types. See <> for information on provisioning and scaling GPU-accelerated worker nodes. +*** *Instance types* – By default, one or more instance type is specified. To remove a default instance type, select the `X` on the right side of the instance type. Choose the instance types to use in your managed node group. For more information, see <>. ++ +The console displays a set of commonly used instance types. If you need to create a managed node group with an instance type that's not displayed, then use `eksctl`, the {aws} CLI, {aws} CloudFormation, or an SDK to create the node group. If you specified a launch template on the previous page, then you can't select a value because the instance type must be specified in the launch template. The value from the launch template is displayed. If you selected *Spot* for *Capacity type*, then we recommend specifying multiple instance types to enhance availability. +*** *Disk size* – Enter the disk size (in GiB) to use for your node's root volume. ++ +If you specified a launch template on the previous page, then you can't select a value because it must be specified in the launch template. +*** *Desired size* – Specify the current number of nodes that the managed node group should maintain at launch. ++ +NOTE: Amazon EKS doesn't automatically scale your node group in or out. However, you can configure the [.noloc]`Kubernetes` Cluster Autoscaler to do this for you. For more information, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler on {aws}]. +*** *Minimum size* – Specify the minimum number of nodes that the managed node group can scale in to. +*** *Maximum size* – Specify the maximum number of nodes that the managed node group can scale out to. +*** *Node group update configuration* – (Optional) You can select the number or percentage of nodes to be updated in parallel. These nodes will be unavailable during the update. For *Maximum unavailable*, select one of the following options and specify a *Value*: ++ +**** *Number* – Select and specify the number of nodes in your node group that can be updated in parallel. +**** *Percentage* – Select and specify the percentage of nodes in your node group that can be updated in parallel. This is useful if you have a large number of nodes in your node group. +*** *Node auto repair configuration* – (Optional) If you activate the *Enable node auto repair* checkbox, Amazon EKS will automatically replace nodes when detected issues occur. For more information, see <>. +. On the *Specify networking* page, fill out the parameters accordingly, and then choose *Next*. ++ +*** *Subnets* – Choose the subnets to launch your managed nodes into. ++ +[IMPORTANT] +==== +If you are running a stateful application across multiple Availability Zones that is backed by Amazon EBS volumes and using the [.noloc]`Kubernetes` https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler], you should configure multiple node groups, each scoped to a single Availability Zone. In addition, you should enable the `--balance-similar-node-groups` feature. +==== ++ +[IMPORTANT] +==== +**** If you choose a public subnet, and your cluster has only the public API server endpoint enabled, then the subnet must have `MapPublicIPOnLaunch` set to `true` for the instances to successfully join a cluster. If the subnet was created using `eksctl` or the <> on or after March 26, 2020, then this setting is already set to `true`. If the subnets were created with `eksctl` or the {aws} CloudFormation templates before March 26, 2020, then you need to change the setting manually. For more information, see link:vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip[Modifying the public IPv4 addressing attribute for your subnet,type="documentation"]. +**** If you use a launch template and specify multiple network interfaces, Amazon EC2 won't auto-assign a public `IPv4` address, even if `MapPublicIpOnLaunch` is set to `true`. For nodes to join the cluster in this scenario, you must either enable the cluster's private API server endpoint, or launch nodes in a private subnet with outbound internet access provided through an alternative method, such as a NAT Gateway. For more information, see link:AWSEC2/latest/UserGuide/using-instance-addressing.html[Amazon EC2 instance IP addressing,type="documentation"] in the _Amazon EC2 User Guide_. +==== + +*** *Configure SSH access to nodes* (Optional). Enabling SSH allows you to connect to your instances and gather diagnostic information if there are issues. We highly recommend enabling remote access when you create a node group. You can't enable remote access after the node group is created. ++ +If you chose to use a launch template, then this option isn't shown. To enable remote access to your nodes, specify a key pair in the launch template and ensure that the proper port is open to the nodes in the security groups that you specify in the launch template. For more information, see <>. ++ +NOTE: For [.noloc]`Windows`, this command doesn't enable SSH. Instead, it associates your Amazon EC2 key pair with the instance and allows you to RDP into the instance. +*** For *SSH key pair* (Optional), choose an Amazon EC2 SSH key to use. For [.noloc]`Linux` information, see link:AWSEC2/latest/UserGuide/ec2-key-pairs.html[Amazon EC2 key pairs and Linux instances,type="documentation"] in the _Amazon EC2 User Guide_. For [.noloc]`Windows` information, see link:AWSEC2/latest/WindowsGuide/ec2-key-pairs.html[Amazon EC2 key pairs and Windows instances,type="documentation"] in the _Amazon EC2 User Guide_. If you chose to use a launch template, then you can't select one. When an Amazon EC2 SSH key is provided for node groups using [.noloc]`Bottlerocket` AMIs, the administrative container is also enabled. For more information, see https://github.com/bottlerocket-os/bottlerocket#admin-container[Admin container] on [.noloc]`GitHub`. +*** For *Allow SSH remote access from*, if you want to limit access to specific instances, then select the security groups that are associated to those instances. If you don't select specific security groups, then SSH access is allowed from anywhere on the internet (`0.0.0.0/0`). +. On the *Review and create* page, review your managed node group configuration and choose *Create*. ++ +If nodes fail to join the cluster, then see <> in the Troubleshooting chapter. +. Watch the status of your nodes and wait for them to reach the `Ready` status. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl get nodes --watch +---- +. (GPU nodes only) If you chose a GPU instance type and an Amazon EKS optimized accelerated AMI, then you must apply the https://github.com/NVIDIA/k8s-device-plugin[NVIDIA device plugin for Kubernetes] as a [.noloc]`DaemonSet` on your cluster. Replace [.replaceable]`vX.X.X` with your desired https://github.com/NVIDIA/k8s-device-plugin/releases[NVIDIA/k8s-device-plugin] version before running the following command. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl apply -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/vX.X.X/deployments/static/nvidia-device-plugin.yml +---- + +== Install Kubernetes add-ons +Now that you have a working Amazon EKS cluster with nodes, you're ready to start installing [.noloc]`Kubernetes` add-ons and deploying applications to your cluster. The following documentation topics help you to extend the functionality of your cluster. + +* The link:IAM/latest/UserGuide/id_roles.html#iam-term-principal[IAM principal,type="documentation"] that created the cluster is the only principal that can make calls to the [.noloc]`Kubernetes` API server with `kubectl` or the {aws-management-console}. If you want other IAM principals to have access to your cluster, then you need to add them. For more information, see <> and <>. +* We recommend blocking [.noloc]`Pod` access to IMDS if the following conditions are true: ++ +** You plan to assign IAM roles to all of your [.noloc]`Kubernetes` service accounts so that [.noloc]`Pods` only have the minimum permissions that they need. +** No [.noloc]`Pods` in the cluster require access to the Amazon EC2 instance metadata service (IMDS) for other reasons, such as retrieving the current {aws} Region. + ++ +For more information, see https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node[Restrict access to the instance profile assigned to the worker node]. +* Configure the [.noloc]`Kubernetes` https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md[Cluster Autoscaler] to automatically adjust the number of nodes in your node groups. +* Deploy a <> to your cluster. +* <> with important tools for managing your cluster. diff --git a/latest/ug/nodes/delete-fargate-profile.adoc b/latest/ug/nodes/delete-fargate-profile.adoc new file mode 100644 index 00000000..cac678fe --- /dev/null +++ b/latest/ug/nodes/delete-fargate-profile.adoc @@ -0,0 +1,54 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[delete-fargate-profile,delete-fargate-profile.title]] += Delete a Fargate profile +:info_titleabbrev: Delete profiles + +[abstract] +-- +When you delete a Fargate profile, any [.noloc]`Pods` that were scheduled onto Fargate with the profile are deleted. +-- + +This topic describes how to delete a Fargate profile. When you delete a Fargate profile, any [.noloc]`Pods` that were scheduled onto Fargate with the profile are deleted. If those [.noloc]`Pods` match another Fargate profile, then they're scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then they aren't scheduled onto Fargate and might remain as pending. + +Only one Fargate profile in a cluster can be in the `DELETING` status at a time. Wait for a Fargate profile to finish deleting before you can delete any other profiles in that cluster. + +You can delete a profile with any of the following tools: + +* <> +* <> +* <> + +== `eksctl` [[eksctl_delete_a_fargate_profile]] + +*Delete a Fargate profile with `eksctl`* + +Use the following command to delete a profile from a cluster. Replace every [.replaceable]`example value` with your own values. + +[source,bash,subs="verbatim,attributes"] +---- +eksctl delete fargateprofile --name my-profile --cluster my-cluster +---- + +== {aws-management-console} [[console_delete_a_fargate_profile]] + +*Delete a Fargate profile with {aws-management-console}* + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. In the left navigation pane, choose *Clusters*. In the list of clusters, choose the cluster that you want to delete the Fargate profile from. +. Choose the *Compute* tab. +. Choose the Fargate profile to delete, and then choose *Delete*. +. On the *Delete Fargate profile* page, enter the name of the profile, and then choose *Delete*. + + +== {aws} CLI [[awscli_delete_a_fargate_profile]] + +*Delete a Fargate profile with {aws} CLI* + +Use the following command to delete a profile from a cluster. Replace every [.replaceable]`example value` with your own values. + +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-fargate-profile --fargate-profile-name my-profile --cluster-name my-cluster +---- diff --git a/latest/ug/nodes/delete-managed-node-group.adoc b/latest/ug/nodes/delete-managed-node-group.adoc new file mode 100644 index 00000000..b957db2c --- /dev/null +++ b/latest/ug/nodes/delete-managed-node-group.adoc @@ -0,0 +1,74 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[delete-managed-node-group,delete-managed-node-group.title]] += Delete a managed node group from your cluster +:info_titleabbrev: Delete + +[abstract] +-- +This topic describes how you can delete an Amazon EKS managed node group. +-- + +This topic describes how you can delete an Amazon EKS managed node group. When you delete a managed node group, Amazon EKS first sets the minimum, maximum, and desired size of your Auto Scaling group to zero. This then causes your node group to scale down. + +Before each instance is terminated, Amazon EKS sends a signal to drain the [.noloc]`Pods` from that node. If the [.noloc]`Pods` haven't drained after a few minutes, Amazon EKS lets Auto Scaling continue the termination of the instance. After every instance is terminated, the Auto Scaling group is deleted. + +[IMPORTANT] +==== + +If you delete a managed node group that uses a node IAM role that isn't used by any other managed node group in the cluster, the role is removed from the `aws-auth` `ConfigMap`. If any of the self-managed node groups in the cluster are using the same node IAM role, the self-managed nodes move to the `NotReady` status. Additionally, the cluster operation is also disrupted. To add a mapping for the role you're using only for the self-managed node groups, see <>, if your cluster's platform version is at least minimum version listed in the prerequisites section of <>. If your platform version is earlier than the required minimum version for access entries, you can add the entry back to the `aws-auth` `ConfigMap`. For more information, enter `eksctl create iamidentitymapping --help` in your terminal. + +==== + +You can delete a managed node group with: + +* <> +* <> +* <> + + +== `eksctl` [[eksctl-delete-managed-nodegroup]] + +*Delete a managed node group with `eksctl`* + +Enter the following command. Replace every [.replaceable]`example value` with your own values. + +[source,bash,subs="verbatim,attributes"] +---- +eksctl delete nodegroup \ + --cluster my-cluster \ + --name my-mng \ + --region region-code +---- + +For more options, see https://eksctl.io/usage/nodegroups/#deleting-and-draining-nodegroups[Deleting and draining nodegroups] in the `eksctl` documentation. + +== {aws-management-console} [[console-delete-managed-nodegroup]] + +*Delete a managed node group with {aws-management-console}* + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. On the *Clusters* page, choose the cluster that contains the node group to delete. +. On the selected cluster page, choose the *Compute* tab. +. In the *Node groups* section, choose the node group to delete. Then choose *Delete*. +. In the *Delete node group* confirmation dialog box, enter the name of the node group. Then choose *Delete*. + +== {aws} CLI [[awscli-delete-managed-nodegroup]] + +*Delete a managed node group with {aws} CLI* + + +. Enter the following command. Replace every [.replaceable]`example value` with your own values. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks delete-nodegroup \ + --cluster-name my-cluster \ + --nodegroup-name my-mng \ + --region region-code +---- +. Use the arrow keys on your keyboard to scroll through the response output. Press the `q` key when you're finished. + ++ +For more options, see the `link:cli/latest/reference/eks/delete-nodegroup.html[delete-nodegroup,type="documentation"]` command in the _{aws} CLI Command Reference_. diff --git a/latest/ug/nodes/dockershim-deprecation.adoc b/latest/ug/nodes/dockershim-deprecation.adoc new file mode 100644 index 00000000..fe88a068 --- /dev/null +++ b/latest/ug/nodes/dockershim-deprecation.adoc @@ -0,0 +1,100 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[dockershim-deprecation,dockershim-deprecation.title]] += Migrate from `dockershim` to `containerd` +:info_titleabbrev: Dockershim deprecation + +[abstract] +-- +Starting with [.noloc]`Kubernetes` version `1.24`, Amazon EKS AMIs that are officially published only include the `containerd` runtime. +-- + +[.noloc]`Kubernetes` no longer supports `dockershim`. The [.noloc]`Kubernetes` team removed the runtime in [.noloc]`Kubernetes` version `1.24`. For more information, see https://kubernetes.io/blog/2022/01/07/kubernetes-is-moving-on-from-dockershim/[Kubernetes is Moving on From Dockershim: Commitments and Next Steps] on the [.noloc]`Kubernetes` Blog. + +Amazon EKS also ended support for `dockershim` starting with the [.noloc]`Kubernetes` version `1.24` release. Amazon EKS AMIs that are officially published have `containerd` as the only runtime starting with version `1.24`. This topic covers some details, but more information is available in link:containers/all-you-need-to-know-about-moving-to-containerd-on-amazon-eks[All you need to know about moving to containerd on Amazon EKS,type="blog"]. + +There's a `kubectl` plugin that you can use to see which of your [.noloc]`Kubernetes` workloads mount the [.noloc]`Docker` socket volume. For more information, see https://github.com/aws-containers/kubectl-detector-for-docker-socket[Detector for Docker Socket (DDS)] on [.noloc]`GitHub`. Amazon EKS AMIs that run [.noloc]`Kubernetes` versions that are earlier than `1.24` use [.noloc]`Docker` as the default runtime. However, these Amazon EKS AMIs have a bootstrap flag option that you can use to test out your workloads on any supported cluster using `containerd`. For more information, see <>. + +We will continue to publish AMIs for existing [.noloc]`Kubernetes` versions until the end of their support date. For more information, see <>. If you require more time to test your workloads on `containerd`, use a supported version before `1.24`. But, when you want to upgrade official Amazon EKS AMIs to version `1.24` or later, make sure to validate that your workloads run on `containerd`. + +The `containerd` runtime provides more reliable performance and security. `containerd` is the runtime that's being standardized on across Amazon EKS. Fargate and [.noloc]`Bottlerocket` already use `containerd` only. `containerd` helps to minimize the number of Amazon EKS AMI releases that are required to address `dockershim` https://cve.mitre.org/[Common Vulnerabilities and Exposures] (CVEs). Because `dockershim` already uses `containerd` internally, you might not need to make any changes. However, there are some situations where changes might or must be required: + + + +* You must make changes to applications that mount the [.noloc]`Docker` socket. For example, container images that are built with a container are impacted. Many monitoring tools also mount the [.noloc]`Docker` socket. You might need to wait for updates or re-deploy workloads for runtime monitoring. +* You might need to make changes for applications that are reliant on specific [.noloc]`Docker` settings. For example, the `HTTPS_PROXY` protocol is no longer supported. You must update applications that use this protocol. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/[dockerd] in the [.noloc]`Docker` Documentation. +* If you use the Amazon ECR credential helper to pull images, you must switch to the `kubelet` image credential provider. For more information, see https://kubernetes.io/docs/tasks/kubelet-credential-provider/kubelet-credential-provider/[Configure a kubelet image credential provider] in the [.noloc]`Kubernetes` documentation. +* Because Amazon EKS `1.24` no longer supports [.noloc]`Docker`, some flags that the https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[Amazon EKS bootstrap script] previously supported are no longer supported. Before moving to Amazon EKS `1.24` or later, you must remove any reference to flags that are now unsupported: ++ +** `--container-runtime dockerd` (``containerd`` is the only supported value) +** `--enable-docker-bridge` +** `--docker-config-json` +* If you already have [.noloc]`Fluentd` configured for [.noloc]`Container Insights`, then you must migrate [.noloc]`Fluentd` to [.noloc]`Fluent Bit` before changing to `containerd`. The [.noloc]`Fluentd` parsers are configured to only parse log messages in JSON format. Unlike `dockerd`, the `containerd` container runtime has log messages that aren't in JSON format. If you don't migrate to [.noloc]`Fluent Bit`, some of the configured [.noloc]`Fluentd's` parsers will generate a massive amount of errors inside the [.noloc]`Fluentd` container. For more information on migrating, see link:AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html[Set up Fluent Bit as a DaemonSet to send logs to CloudWatch Logs,type="documentation"]. +* If you use a custom AMI and you are upgrading to Amazon EKS `1.24`, then you must make sure that IP forwarding is enabled for your worker nodes. This setting wasn't needed with [.noloc]`Docker` but is required for `containerd`. It is needed to troubleshoot [.noloc]`Pod`-to-[.noloc]`Pod`, [.noloc]`Pod`-to-external, or [.noloc]`Pod`-to-[.noloc]`apiserver` network connectivity. ++ +To verify this setting on a worker node, run either of the following commands: ++ +** `sysctl net.ipv4.ip_forward` +** `cat /proc/sys/net/ipv4/ip_forward` + ++ +If the output is `0`, then run either of the following commands to activate the `net.ipv4.ip_forward` kernel variable: ++ +** `sysctl -w net.ipv4.ip_forward=1` +** `echo 1 > /proc/sys/net/ipv4/ip_forward` + +For the setting's activation on Amazon EKS AMIs for Amazon Linux 2 in the `containerd` runtime, see `https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/provisioners/install-worker.sh[install-worker.sh]` on [.noloc]`GitHub`. + +[[containerd-bootstrap,containerd-bootstrap.title]] +== Test Amazon Linux 2 migration from [.noloc]`Docker` to `containerd` + +For [.noloc]`Kubernetes` version `1.23`, you can use an optional bootstrap flag to enable the `containerd` runtime for Amazon EKS optimized AL2 AMIs. This feature gives you a clear path to migrate to `containerd` when updating to version `1.24` or later. Amazon EKS ended support for [.noloc]`Docker` starting with the [.noloc]`Kubernetes` version `1.24` launch. The `containerd` runtime is widely adopted in the [.noloc]`Kubernetes` community and is a graduated project with the CNCF. You can test it by adding a node group to a new or existing cluster. + +You can enable the boostrap flag by creating one of the following types of node groups. + + + +*Self-managed*:: +Create the node group using the instructions in <>. Specify an Amazon EKS optimized AMI and the following text for the `BootstrapArguments` parameter. ++ +[source,bash,subs="verbatim,attributes"] +---- +--container-runtime containerd +---- + + +*Managed*:: +If you use `eksctl`, create a file named `my-nodegroup.yaml` with the following contents. Replace every [.replaceable]`example value` with your own values. The node group name can't be longer than 63 characters. It must start with letter or digit, but can also include hyphens and underscores for the remaining characters. To retrieve an optimized AMI ID for `ami-[.replaceable]``1234567890abcdef0```, see <>. ++ +[source,yaml,subs="verbatim,attributes"] +---- +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: my-cluster + region: region-code + version: 1.23 +managedNodeGroups: + - name: my-nodegroup + ami: ami-1234567890abcdef0 + overrideBootstrapCommand: | + #!/bin/bash + /etc/eks/bootstrap.sh my-cluster --container-runtime containerd +---- ++ +NOTE: If you launch many nodes simultaneously, you may also want to specify values for the `--apiserver-endpoint`, `--b64-cluster-ca`, and `--dns-cluster-ip` bootstrap arguments to avoid errors. For more information, see <>. ++ +Run the following command to create the node group. ++ +[source,bash,subs="verbatim,attributes"] +---- +eksctl create nodegroup -f my-nodegroup.yaml +---- ++ +If you prefer to use a different tool to create your managed node group, you must deploy the node group using a launch template. In your launch template, specify an <>, then <> and provide the following user data. This user data passes arguments into the `bootstrap.sh` file. For more information about the bootstrap file, see https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap.sh] on [.noloc]`GitHub`. ++ +[source,bash,subs="verbatim,attributes"] +---- +/etc/eks/bootstrap.sh my-cluster --container-runtime containerd +---- diff --git a/latest/ug/nodes/eks-ami-build-scripts.adoc b/latest/ug/nodes/eks-ami-build-scripts.adoc new file mode 100644 index 00000000..ee6387c0 --- /dev/null +++ b/latest/ug/nodes/eks-ami-build-scripts.adoc @@ -0,0 +1,25 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-ami-build-scripts,eks-ami-build-scripts.title]] += Build a custom Amazon Linux AMI with a script +:info_titleabbrev: Custom builds + +[abstract] +-- +Amazon Elastic Kubernetes Service (Amazon EKS) has open-source scripts that are used to build the Amazon EKS optimized AMI. +-- + +Amazon Elastic Kubernetes Service (Amazon EKS) has open-source scripts that are used to build the Amazon EKS optimized AMI. These build scripts are available https://github.com/awslabs/amazon-eks-ami[on GitHub]. + +The Amazon EKS optimized Amazon Linux AMIs are built on top of Amazon Linux 2 (AL2) and Amazon Linux 2023 (AL2023), specifically for use as a node in Amazon EKS clusters. You can use this repository to view the specifics of how the Amazon EKS team configures `kubelet`, the runtime, the {aws} IAM Authenticator for [.noloc]`Kubernetes`, and build your own Amazon Linux based AMI from scratch. + +The build scripts repository includes a https://www.packer.io/[HashiCorp packer] template and build scripts to generate an AMI. These scripts are the source of truth for Amazon EKS optimized AMI builds, so you can follow the [.noloc]`GitHub` repository to monitor changes to our AMIs. For example, perhaps you want your own AMI to use the same version of [.noloc]`Docker` that the Amazon EKS team uses for the official AMI. + +The [.noloc]`GitHub` repository also contains the specialized https://github.com/awslabs/amazon-eks-ami/blob/main/templates/al2/runtime/bootstrap.sh[bootstrap script] and https://awslabs.github.io/amazon-eks-ami/nodeadm/[nodeadm script] that runs at boot time to configure your instance's certificate data, control plane endpoint, cluster name, and more. + +Additionally, the [.noloc]`GitHub` repository contains our Amazon EKS node {aws} CloudFormation templates. These templates make it easier to spin up an instance running an Amazon EKS optimized AMI and register it with a cluster. + +For more information, see the repositories on [.noloc]`GitHub` at https://github.com/awslabs/amazon-eks-ami. + +Amazon EKS optimized AL2 contains an optional bootstrap flag to enable the `containerd` runtime. diff --git a/latest/ug/nodes/eks-ami-versions-bottlerocket.adoc b/latest/ug/nodes/eks-ami-versions-bottlerocket.adoc new file mode 100644 index 00000000..a05207d6 --- /dev/null +++ b/latest/ug/nodes/eks-ami-versions-bottlerocket.adoc @@ -0,0 +1,15 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-ami-versions-bottlerocket,eks-ami-versions-bottlerocket.title]] += Retrieve [.noloc]`Bottlerocket` AMI version information +:info_titleabbrev: Get version information + +[abstract] +-- +This topic gives resources for Amazon EKS optimized [.noloc]`Bottlerocket` AMIs version information. +-- + +Each [.noloc]`Bottlerocket` AMI release includes various versions of https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/[kubelet], the [.noloc]`Bottlerocket` kernel, and https://containerd.io/[containerd]. Accelerated AMI variants also include various versions of the [.noloc]`NVIDIA` driver. You can find this version information in the https://bottlerocket.dev/en/os/[OS] topic of the _Bottlerocket Documentation_. From this page, navigate to the applicable _Version Information_ sub-topic. + +The _Bottlerocket Documentation_ can sometimes lag behind the versions that are available on GitHub. You can find a list of changes for the latest versions in the https://github.com/bottlerocket-os/bottlerocket/releases[releases] on [.noloc]`GitHub`. diff --git a/latest/ug/nodes/eks-ami-versions-windows.adoc b/latest/ug/nodes/eks-ami-versions-windows.adoc new file mode 100644 index 00000000..a858c1a0 --- /dev/null +++ b/latest/ug/nodes/eks-ami-versions-windows.adoc @@ -0,0 +1,3993 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-ami-versions-windows,eks-ami-versions-windows.title]] += Retrieve [.noloc]`Windows` AMI version information +:info_titleabbrev: Get version information + +[abstract] +-- +This topic lists versions of the Amazon EKS optimized [.noloc]`Windows` AMIs and their corresponding versions of `kubelet`, `containerd`, and `csi-proxy`. +-- + +[IMPORTANT] +==== + +Extended Support for Amazon EKS optimized [.noloc]`Windows` AMIs that are published by {aws} isn't available for [.noloc]`Kubernetes` version `1.23` but is available for [.noloc]`Kubernetes` version `1.24` and higher. + +==== + +This topic lists versions of the Amazon EKS optimized [.noloc]`Windows` AMIs and their corresponding versions of https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/[kubelet], https://containerd.io/[containerd], and https://github.com/kubernetes-csi/csi-proxy[csi-proxy]. + +The Amazon EKS optimized AMI metadata, including the AMI ID, for each variant can be retrieved programmatically. For more information, see <>. + +AMIs are versioned by [.noloc]`Kubernetes` version and the release date of the AMI in the following format: + +[source,none,subs="verbatim,attributes"] +---- +k8s_major_version.k8s_minor_version-release_date +---- + +[NOTE] +==== + +Amazon EKS managed node groups support the November 2022 and later releases of the [.noloc]`Windows` AMIs. + +==== + +[[eks-ami-versions-windows-2022-core,eks-ami-versions-windows-2022-core.title]] +== Amazon EKS optimized [.noloc]`Windows` Server 2022 Core AMI + +The following tables list the current and previous versions of the Amazon EKS optimized [.noloc]`Windows` Server 2022 Core AMI. + +==== +[role="tablist"] +*[.noloc]`Kubernetes` version [.noloc]`1.31`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.31-2024.12.13` +|`1.31.3` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.11.12` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.08` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.01` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.09.10` +|`1.31.0` +|`1.7.20` +|`1.1.3` +| +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.30`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.30-2024.12.11` +|`1.30.7` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.11.12` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.10.08` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.09.10` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.08.13` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.07.10` +|`1.30.2` +|`1.7.14` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.30-2024.06.17` +|`1.30.0` +|`1.7.14` +|`1.1.2` +|Upgraded `containerd` to `1.7.14`. + +|`1.30-2024.05.15` +|`1.30.0` +|`1.6.28` +|`1.1.2` +| +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.29`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.29-2024.12.11` +|`1.29.10` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.11.12` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.10.08` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.09.10` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.08.13` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.07.10` +|`1.29.6` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.29-2024.06.17` +|`1.29.3` +|`1.7.11` +|`1.1.2` +| + +|`1.29-2024.05.15` +|`1.29.3` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. Upgraded `kubelet` to `1.29.3`. + +|`1.29-2024.04.09` +|`1.29.0` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.29-2024.03.12` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.13` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.06` +|`1.29.0` +|`1.6.25` +|`1.1.2` +|Fixed a bug where the pause image was incorrectly deleted by `kubelet` garbage collection process. + +|`1.29-2024.01.11` +|`1.29.0` +|`1.6.18` +|`1.1.2` +|Excluded Standalone [.noloc]`Windows` Update https://support.microsoft.com/en-au/topic/kb5034439-windows-recovery-environment-update-for-azure-stack-hci-version-22h2-and-windows-server-2022-january-9-2024-6f9d26e6-784c-4503-a3c6-0beedda443ca[KB5034439] on [.noloc]`Windows` Server 2022 Core AMIs. The KB applies only to [.noloc]`Windows` installations with a separate [.noloc]`WinRE` partition, which aren't included with any of our Amazon EKS Optimized [.noloc]`Windows` AMIs. +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.28`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.28-2024.12.11` +|`1.28.15` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.11.12` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.10.08` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.09.10` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.08.13` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.07.10` +|`1.28.11` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.28-2024.06.17` +|`1.28.8` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.28-2024.05.14` +|`1.28.8` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.28.8`. + +|`1.28-2024.04.09` +|`1.28.5` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.28-2024.03.12` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.02.13` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.01.11` +|`1.28.5` +|`1.6.18` +|`1.1.2` +|Excluded Standalone [.noloc]`Windows` Update https://support.microsoft.com/en-au/topic/kb5034439-windows-recovery-environment-update-for-azure-stack-hci-version-22h2-and-windows-server-2022-january-9-2024-6f9d26e6-784c-4503-a3c6-0beedda443ca[KB5034439] on [.noloc]`Windows` Server 2022 Core AMIs. The KB applies only to [.noloc]`Windows` installations with a separate [.noloc]`WinRE` partition, which aren't included with any of our Amazon EKS Optimized [.noloc]`Windows` AMIs. + +|`1.28-2023.12.12` +|`1.28.3` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.11.14` +|`1.28.3` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.28-2023.10.19` +|`1.28.2` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.28-2023-09.27` +|`1.28.2` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.28-2023.09.12` +|`1.28.1` +|`1.6.6` +|`1.1.2` +| +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.27`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.27-2024.12.11` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.11.12` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.10.08` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.09.10` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.08.13` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.07.10` +|`1.27.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.27-2024.06.17` +|`1.27.12` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.27-2024.05.14` +|`1.27.12` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.27.12`. + +|`1.27-2024.04.09` +|`1.27.9` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.27-2024.03.12` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.02.13` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.01.11` +|`1.27.9` +|`1.6.18` +|`1.1.2` +|Excluded Standalone [.noloc]`Windows` Update https://support.microsoft.com/en-au/topic/kb5034439-windows-recovery-environment-update-for-azure-stack-hci-version-22h2-and-windows-server-2022-january-9-2024-6f9d26e6-784c-4503-a3c6-0beedda443ca[KB5034439] on [.noloc]`Windows` Server 2022 Core AMIs. The KB applies only to [.noloc]`Windows` installations with a separate [.noloc]`WinRE` partition, which aren't included with any of our Amazon EKS Optimized [.noloc]`Windows` AMIs. + +|`1.27-2023.12.12` +|`1.27.7` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.11.14` +|`1.27.7` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.27-2023.10.19` +|`1.27.6` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.27-2023-09.27` +|`1.27.6` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.27-2023.09.12` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.27-2023.08.17` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.27-2023.08.08` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.07.11` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.06.20` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.27-2023.06.14` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.27-2023.06.06` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Fixed `containers-roadmap` https://github.com/aws/containers-roadmap/issues/2042[issue #2042], which caused nodes to fail pulling private Amazon ECR images. + +|`1.27-2023.05.17` +|`1.27.1` +|`1.6.6` +|`1.1.1` +| +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.26`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.26-2024.12.11` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.11.12` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.10.08` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.09.10` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.08.13` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.07.10` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.26-2024.06.17` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.26-2024.05.14` +|`1.26.15` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.26.15`. + +|`1.26-2024.04.09` +|`1.26.12` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.26-2024.03.12` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.02.13` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.01.11` +|`1.26.12` +|`1.6.18` +|`1.1.2` +|Excluded Standalone [.noloc]`Windows` Update https://support.microsoft.com/en-au/topic/kb5034439-windows-recovery-environment-update-for-azure-stack-hci-version-22h2-and-windows-server-2022-january-9-2024-6f9d26e6-784c-4503-a3c6-0beedda443ca[KB5034439] on [.noloc]`Windows` Server 2022 Core AMIs. The KB applies only to [.noloc]`Windows` installations with a separate [.noloc]`WinRE` partition, which aren't included with any of our Amazon EKS Optimized [.noloc]`Windows` AMIs. + +|`1.26-2023.12.12` +|`1.26.10` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.11.14` +|`1.26.10` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.26-2023.10.19` +|`1.26.9` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.26.9`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.26-2023.09.12` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.26-2023.08.17` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.26-2023.08.08` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.07.11` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.06.20` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.26-2023.06.14` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.26.4`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.26-2023.05.09` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.26-2023.04.26` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.04.11` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.26-2023.03.24` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.25`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.25-2024.12.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.11.12` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.10.08` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.09.10` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.08.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.07.10` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.25-2024.06.17` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.25-2024.05.14` +|`1.25.16` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.25-2024.04.09` +|`1.25.16` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.25-2024.03.12` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.02.13` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.01.11` +|`1.25.16` +|`1.6.18` +|`1.1.2` +|Excluded Standalone [.noloc]`Windows` Update https://support.microsoft.com/en-au/topic/kb5034439-windows-recovery-environment-update-for-azure-stack-hci-version-22h2-and-windows-server-2022-january-9-2024-6f9d26e6-784c-4503-a3c6-0beedda443ca[KB5034439] on [.noloc]`Windows` Server 2022 Core AMIs. The KB applies only to [.noloc]`Windows` installations with a separate [.noloc]`WinRE` partition, which aren't included with any of our Amazon EKS Optimized [.noloc]`Windows` AMIs. + +|`1.25-2023.12.12` +|`1.25.15` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.11.14` +|`1.25.15` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.25-2023.10.19` +|`1.25.14` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.25.14`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.25-2023.09.12` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.25-2023.08.17` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.25-2023.08.08` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.07.11` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.06.20` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.25-2023.06.14` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.25.9`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.25-2023.05.09` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.25-2023.04.11` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.25-2023.03.27` +|`1.25.6` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.25-2023.03.20` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.02.14` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| +|=== + +*[.noloc]`Kubernetes` version [.noloc]`1.24`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.24-2024.12.11` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.11.12` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.10.08` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.09.10` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.08.13` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.07.10` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.24-2024.06.17` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.24-2024.05.14` +|`1.24.17` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.24-2024.04.09` +|`1.24.17` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.24-2024.03.12` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.02.13` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.01.11` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Excluded Standalone [.noloc]`Windows` Update https://support.microsoft.com/en-au/topic/kb5034439-windows-recovery-environment-update-for-azure-stack-hci-version-22h2-and-windows-server-2022-january-9-2024-6f9d26e6-784c-4503-a3c6-0beedda443ca[KB5034439] on [.noloc]`Windows` Server 2022 Core AMIs. The KB applies only to [.noloc]`Windows` installations with a separate [.noloc]`WinRE` partition, which aren't included with any of our Amazon EKS Optimized [.noloc]`Windows` AMIs. + +|`1.24-2023.12.12` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.11.14` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.24-2023.10.19` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.24.17`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.24-2023.09.12` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.24-2023.08.17` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.24-2023.08.08` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.07.11` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.06.20` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.24-2023.06.14` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.24.13`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.24-2023.05.09` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.24-2023.04.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.24-2023.03.27` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate gMSA authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.24-2023.03.20` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|[.noloc]`Kubernetes` version downgraded to `1.24.7` because `1.24.10` has a reported issue in `kube-proxy`. + +|`1.24-2023.02.14` +|`1.24.10` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.23` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.12.13` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.10.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| +|=== +==== + +[[eks-ami-versions-windows-2022-full,eks-ami-versions-windows-2022-full.title]] +== Amazon EKS optimized [.noloc]`Windows` Server 2022 Full AMI + +The following tables list the current and previous versions of the Amazon EKS optimized [.noloc]`Windows` Server 2022 Full AMI. + +==== +[role="tablist"] +*[.noloc]`Kubernetes` version [.noloc]`1.31`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.31-2024.12.13` +|`1.31.3` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.11.12` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.08` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.01` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.09.10` +|`1.31.0` +|`1.7.20` +|`1.1.3` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.30`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.30-2024.12.11` +|`1.30.7` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.11.12` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.10.08` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.09.10` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.08.13` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.07.10` +|`1.30.2` +|`1.7.14` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.30-2024.06.17` +|`1.30.0` +|`1.7.14` +|`1.1.2` +|Upgraded `containerd` to `1.7.14`. + +|`1.30-2024.05.15` +|`1.30.0` +|`1.6.28` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.29`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.29-2024.12.11` +|`1.29.10` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.11.12` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.10.08` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.09.10` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.08.13` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.07.10` +|`1.29.6` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.29-2024.06.17` +|`1.29.3` +|`1.7.11` +|`1.1.2` +| + +|`1.29-2024.05.15` +|`1.29.3` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. Upgraded `kubelet` to `1.29.3`. + +|`1.29-2024.04.09` +|`1.29.0` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.29-2024.03.12` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.13` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.06` +|`1.29.0` +|`1.6.25` +|`1.1.2` +|Fixed a bug where the pause image was incorrectly deleted by `kubelet` garbage collection process. + +|`1.29-2024.01.09` +|`1.29.0` +|`1.6.18` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.28`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.28-2024.12.11` +|`1.28.15` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.11.12` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.10.08` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.09.10` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.08.13` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.07.10` +|`1.28.11` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.28-2024.06.17` +|`1.28.8` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.28-2024.05.14` +|`1.28.8` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.28.8`. + +|`1.28-2024.04.09` +|`1.28.5` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.28-2024.03.12` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.02.13` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.01.09` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.12.12` +|`1.28.3` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.11.14` +|`1.28.3` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.28-2023.10.19` +|`1.28.2` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.28-2023-09.27` +|`1.28.2` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.28-2023.09.12` +|`1.28.1` +|`1.6.6` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.27`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.27-2024.12.11` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.11.12` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.10.08` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.09.10` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.08.13` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.07.10` +|`1.27.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.27-2024.06.17` +|`1.27.12` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.27-2024.05.14` +|`1.27.12` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.27.12`. + +|`1.27-2024.04.09` +|`1.27.9` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.27-2024.03.12` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.02.13` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.01.09` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.12.12` +|`1.27.7` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.11.14` +|`1.27.7` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.27-2023.10.19` +|`1.27.6` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.27-2023-09.27` +|`1.27.6` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.27-2023.09.12` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.27-2023.08.17` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.27-2023.08.08` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.07.11` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.06.20` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.27-2023.06.14` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.27-2023.06.06` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Fixed `containers-roadmap` https://github.com/aws/containers-roadmap/issues/2042[issue #2042], which caused nodes to fail pulling private Amazon ECR images. + +|`1.27-2023.05.18` +|`1.27.1` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.26`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.26-2024.12.11` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.11.12` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.10.08` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.09.10` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.08.13` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.07.10` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.26-2024.06.17` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.26-2024.05.14` +|`1.26.15` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.26.15`. + +|`1.26-2024.04.09` +|`1.26.12` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.26-2024.03.12` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.02.13` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.01.09` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.12.12` +|`1.26.10` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.11.14` +|`1.26.10` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.26-2023.10.19` +|`1.26.9` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.26.9`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.26-2023.09.12` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.26-2023.08.17` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.26-2023.08.08` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.07.11` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.06.20` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.26-2023.06.14` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.26.4`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.26-2023.05.09` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.26-2023.04.26` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.04.11` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.26-2023.03.24` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.25`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.25-2024.12.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.11.12` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.10.08` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.09.10` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.08.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.07.10` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.25-2024.06.17` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.25-2024.05.14` +|`1.25.16` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.25-2024.04.09` +|`1.25.16` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.25-2024.03.12` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.02.13` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.01.09` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.12.12` +|`1.25.15` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.11.14` +|`1.25.15` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.25-2023.10.19` +|`1.25.14` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.25.14`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.25-2023.09.12` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.25-2023.08.17` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.25-2023.08.08` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.07.11` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.06.20` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.25-2023.06.14` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.25.9`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.25-2023.05.09` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.25-2023.04.11` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.25-2023.03.27` +|`1.25.6` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.25-2023.03.20` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.02.14` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.24`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.24-2024.12.11` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.11.12` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.10.08` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.09.10` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.08.13` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.07.10` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.24-2024.06.17` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.24-2024.05.14` +|`1.24.17` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.24-2024.04.09` +|`1.24.17` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.24-2024.03.12` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.02.13` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.01.09` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.12.12` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.11.14` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.24-2023.10.19` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.24.17`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.24-2023.09.12` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.24-2023.08.17` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.24-2023.08.08` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.07.11` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.06.20` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.24-2023.06.14` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.24.13`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.24-2023.05.09` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.24-2023.04.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.24-2023.03.27` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.24-2023.03.20` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|[.noloc]`Kubernetes` version downgraded to `1.24.7` because `1.24.10` has a reported issue in `kube-proxy`. + +|`1.24-2023.02.14` +|`1.24.10` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.23` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.12.14` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.10.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| +|=== +==== + +[[eks-ami-versions-windows-2019-core,eks-ami-versions-windows-2019-core.title]] +== Amazon EKS optimized [.noloc]`Windows` Server 2019 Core AMI + +The following tables list the current and previous versions of the Amazon EKS optimized [.noloc]`Windows` Server 2019 Core AMI. + +==== +[role="tablist"] +*[.noloc]`Kubernetes` version [.noloc]`1.31`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.31-2024.12.13` +|`1.31.3` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.11.12` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.08` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.01` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.09.10` +|`1.31.0` +|`1.7.20` +|`1.1.3` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.30`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.30-2024.12.11` +|`1.30.7` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.11.12` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.10.08` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.09.10` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.08.13` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.07.10` +|`1.30.2` +|`1.7.14` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.30-2024.06.17` +|`1.30.0` +|`1.7.14` +|`1.1.2` +|Upgraded `containerd` to `1.7.14`. + +|`1.30-2024.05.15` +|`1.30.0` +|`1.6.28` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.29`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.29-2024.12.11` +|`1.29.10` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.11.12` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.10.08` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.09.10` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.08.13` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.07.10` +|`1.29.6` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.29-2024.06.17` +|`1.29.3` +|`1.7.11` +|`1.1.2` +| + +|`1.29-2024.05.15` +|`1.29.3` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. Upgraded `kubelet` to `1.29.3`. + +|`1.29-2024.04.09` +|`1.29.0` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.29-2024.03.13` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.13` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.06` +|`1.29.0` +|`1.6.25` +|`1.1.2` +|Fixed a bug where the pause image was incorrectly deleted by `kubelet` garbage collection process. + +|`1.29-2024.01.09` +|`1.29.0` +|`1.6.18` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.28`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.28-2024.12.11` +|`1.28.15` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.11.12` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.10.08` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.09.10` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.08.13` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.07.10` +|`1.28.11` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.28-2024.06.17` +|`1.28.8` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.28-2024.05.14` +|`1.28.8` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.28.8`. + +|`1.28-2024.04.09` +|`1.28.5` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.28-2024.03.13` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.02.13` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.01.09` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.12.12` +|`1.28.3` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.11.14` +|`1.28.3` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.28-2023.10.19` +|`1.28.2` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.28-2023-09.27` +|`1.28.2` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.28-2023.09.12` +|`1.28.1` +|`1.6.6` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.27`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.27-2024.12.11` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.11.12` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.10.08` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.09.10` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.08.13` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.07.10` +|`1.27.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.27-2024.06.17` +|`1.27.12` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.27-2024.05.14` +|`1.27.12` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.27.12`. + +|`1.27-2024.04.09` +|`1.27.9` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.27-2024.03.13` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.02.13` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.01.09` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.12.12` +|`1.27.7` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.11.14` +|`1.27.7` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.27-2023.10.19` +|`1.27.6` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.27-2023-09.27` +|`1.27.6` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.27-2023.09.12` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.27-2023.08.17` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.27-2023.08.08` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.07.11` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.06.20` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.27-2023.06.14` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.27-2023.06.06` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Fixed `containers-roadmap` https://github.com/aws/containers-roadmap/issues/2042[issue #2042], which caused nodes to fail pulling private Amazon ECR images. + +|`11.27-2023.05.18` +|`1.27.1` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.26`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.26-2024.12.11` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.11.12` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.10.09` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.09.10` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.08.13` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.07.10` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.26-2024.06.17` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.26-2024.05.14` +|`1.26.15` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.26.15`. + +|`1.26-2024.04.09` +|`1.26.12` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.26-2024.03.13` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.02.13` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.01.09` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.12.12` +|`1.26.10` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.11.14` +|`1.26.10` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.26-2023.10.19` +|`1.26.9` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.26.9`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.26-2023.09.12` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.26-2023.08.17` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.26-2023.08.08` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.07.11` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.06.20` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.26-2023.06.14` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.26.4`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.26-2023.05.09` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.26-2023.04.26` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.04.11` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.26-2023.03.24` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.25`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.25-2024.12.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.11.12` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.10.08` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.09.10` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.08.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.07.10` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.25-2024.06.17` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.25-2024.05.14` +|`1.25.16` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.25-2024.04.09` +|`1.25.16` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.25-2024.03.13` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.02.13` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.01.09` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.12.12` +|`1.25.15` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.11.14` +|`1.25.15` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.25-2023.10.19` +|`1.25.14` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.25.14`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.25-2023.09.12` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.25-2023.08.17` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.25-2023.08.08` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.07.11` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.06.20` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.25-2023.06.14` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.25.9`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.25-2023.05.09` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.25-2023.04.11` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.25-2023.03.27` +|`1.25.6` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.25-2023.03.20` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.02.14` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.24`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.24-2024.12.11` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.11.12` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.10.08` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.09.10` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.08.13` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.07.10` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.24-2024.06.17` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.24-2024.05.14` +|`1.24.17` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.24-2024.04.09` +|`1.24.17` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.24-2024.03.13` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.02.13` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.01.09` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.12.12` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.11.14` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.24-2023.10.19` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.24.17`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.24-2023.09.12` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.24-2023.08.17` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.24-2023.08.08` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.07.11` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.06.20` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.24-2023.06.14` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.24.13`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.24-2023.05.09` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.24-2023.04.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.24-2023.03.27` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.24-2023.03.20` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|[.noloc]`Kubernetes` version downgraded to `1.24.7` because `1.24.10` has a reported issue in `kube-proxy`. + +|`1.24-2023.02.14` +|`1.24.10` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.23` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.12.13` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.11.08` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| +|=== +==== + +[[eks-ami-versions-windows-2019-full,eks-ami-versions-windows-2019-full.title]] +== Amazon EKS optimized [.noloc]`Windows` Server 2019 Full AMI + +The following tables list the current and previous versions of the Amazon EKS optimized [.noloc]`Windows` Server 2019 Full AMI. + +==== +[role="tablist"] +*[.noloc]`Kubernetes` version [.noloc]`1.31`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.31-2024.12.13` +|`1.31.3` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.11.12` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.08` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.10.01` +|`1.31.1` +|`1.7.20` +|`1.1.3` +| + +|`1.31-2024.09.10` +|`1.31.0` +|`1.7.20` +|`1.1.3` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.30`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.30-2024.12.11` +|`1.30.7` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.11.12` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.10.08` +|`1.30.4` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.09.10` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.08.13` +|`1.30.2` +|`1.7.14` +|`1.1.3` +| + +|`1.30-2024.07.10` +|`1.30.2` +|`1.7.14` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.30-2024.06.17` +|`1.30.0` +|`1.7.14` +|`1.1.2` +|Upgraded `containerd` to `1.7.14`. + +|`1.30-2024.05.15` +|`1.30.0` +|`1.6.28` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.29`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.29-2024.12.11` +|`1.29.10` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.11.12` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.10.08` +|`1.29.8` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.09.10` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.08.13` +|`1.29.6` +|`1.7.14` +|`1.1.3` +| + +|`1.29-2024.07.10` +|`1.29.6` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.29-2024.06.17` +|`1.29.3` +|`1.7.11` +|`1.1.2` +| + +|`1.29-2024.05.15` +|`1.29.3` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. Upgraded `kubelet` to `1.29.3`. + +|`1.29-2024.04.09` +|`1.29.0` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.29-2024.03.13` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.13` +|`1.29.0` +|`1.6.25` +|`1.1.2` +| + +|`1.29-2024.02.06` +|`1.29.0` +|`1.6.25` +|`1.1.2` +|Fixed a bug where the pause image was incorrectly deleted by `kubelet` garbage collection process. + +|`1.29-2024.01.09` +|`1.29.0` +|`1.6.18` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.28`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.28-2024.12.11` +|`1.28.15` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.11.12` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.10.08` +|`1.28.13` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.09.10` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.08.13` +|`1.28.11` +|`1.7.14` +|`1.1.3` +| + +|`1.28-2024.07.10` +|`1.28.11` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.28-2024.06.17` +|`1.28.8` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.28-2024.05.14` +|`1.28.8` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.28.8`. + +|`1.28-2024.04.09` +|`1.28.5` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.28-2024.03.13` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.02.13` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2024.01.09` +|`1.28.5` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.12.12` +|`1.28.3` +|`1.6.18` +|`1.1.2` +| + +|`1.28-2023.11.14` +|`1.28.3` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.28-2023.10.19` +|`1.28.2` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.28-2023-09.27` +|`1.28.2` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.28-2023.09.12` +|`1.28.1` +|`1.6.6` +|`1.1.2` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.27`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.27-2024.12.11` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.11.12` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.10.08` +|`1.27.16` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.09.10` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.08.13` +|`1.27.15` +|`1.7.14` +|`1.1.3` +| + +|`1.27-2024.07.10` +|`1.27.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.27-2024.06.17` +|`1.27.12` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.27-2024.05.14` +|`1.27.12` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.27.12`. + +|`1.27-2024.04.09` +|`1.27.9` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.27-2024.03.13` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.02.13` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2024.01.09` +|`1.27.9` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.12.12` +|`1.27.7` +|`1.6.18` +|`1.1.2` +| + +|`1.27-2023.11.14` +|`1.27.7` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.27-2023.10.19` +|`1.27.6` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.27-2023-09.27` +|`1.27.6` +|`1.6.6` +|`1.1.2` +|Fixed a https://github.com/advisories/GHSA-6xv5-86q9-7xr8[security advisory] in `kubelet`. + +|`1.27-2023.09.12` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.27-2023.08.17` +|`1.27.4` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.27-2023.08.08` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.07.11` +|`1.27.3` +|`1.6.6` +|`1.1.1` +| + +|`1.27-2023.06.20` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.27-2023.06.14` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.27-2023.06.06` +|`1.27.1` +|`1.6.6` +|`1.1.1` +|Fixed `containers-roadmap` https://github.com/aws/containers-roadmap/issues/2042[issue #2042], which caused nodes to fail pulling private Amazon ECR images. + +|`1.27-2023.05.17` +|`1.27.1` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.26`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.26-2024.12.11` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.11.12` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.10.08` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.09.10` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.08.13` +|`1.26.15` +|`1.7.14` +|`1.1.3` +| + +|`1.26-2024.07.10` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.26-2024.06.17` +|`1.26.15` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.26-2024.05.14` +|`1.26.15` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. Upgraded `kubelet` to `1.26.15`. + +|`1.26-2024.04.09` +|`1.26.12` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.26-2024.03.13` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.02.13` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2024.01.09` +|`1.26.12` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.12.12` +|`1.26.10` +|`1.6.18` +|`1.1.2` +| + +|`1.26-2023.11.14` +|`1.26.10` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.26-2023.10.19` +|`1.26.9` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.26.9`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.26-2023.09.12` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.26-2023.08.17` +|`1.26.7` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.26-2023.08.08` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.07.11` +|`1.26.6` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.06.20` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.26-2023.06.14` +|`1.26.4` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.26.4`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.26-2023.05.09` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.26-2023.04.26` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| + +|`1.26-2023.04.11` +|`1.26.2` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.26-2023.03.24` +|`1.26.2` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.25`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.25-2024.12.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.11.12` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.10.08` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.09.10` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.08.13` +|`1.25.16` +|`1.7.14` +|`1.1.3` +| + +|`1.25-2024.07.10` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.25-2024.06.17` +|`1.25.16` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.25-2024.05.14` +|`1.25.16` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.25-2024.04.09` +|`1.25.16` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.25-2024.03.13` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.02.13` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2024.01.09` +|`1.25.16` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.12.12` +|`1.25.15` +|`1.6.18` +|`1.1.2` +| + +|`1.25-2023.11.14` +|`1.25.15` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.25-2023.10.19` +|`1.25.14` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.25.14`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.25-2023.09.12` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.25-2023.08.17` +|`1.25.12` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.25-2023.08.08` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.07.11` +|`1.25.9` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.06.20` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.25-2023.06.14` +|`1.25.9` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.25.9`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.25-2023.05.09` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.25-2023.04.11` +|`1.25.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.25-2023.03.27` +|`1.25.6` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.25-2023.03.20` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| + +|`1.25-2023.02.14` +|`1.25.6` +|`1.6.6` +|`1.1.1` +| +|=== + + +*[.noloc]`Kubernetes` version [.noloc]`1.24`*:: ++ +[cols="1,1,1,1,1", options="header"] +|=== +|AMI version +|kubelet version +|containerd version +|csi-proxy version +|Release notes + +|`1.24-2024.12.11` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.11.12` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.10.08` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.09.10` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.08.13` +|`1.24.17` +|`1.7.14` +|`1.1.3` +| + +|`1.24-2024.07.10` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Includes patches for `CVE-2024-5321`. + +|`1.24-2024.06.17` +|`1.24.17` +|`1.7.11` +|`1.1.2` +|Upgraded `containerd` to `1.7.11`. + +|`1.24-2024.05.14` +|`1.24.17` +|`1.6.28` +|`1.1.2` +|Upgraded `containerd` to `1.6.28`. + +|`1.24-2024.04.09` +|`1.24.17` +|`1.6.25` +|`1.1.2` +|Upgraded `containerd` to `1.6.25`. Rebuilt CNI and `csi-proxy` using `golang 1.22.1`. + +|`1.24-2024.03.13` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.02.13` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2024.01.09` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.12.12` +|`1.24.17` +|`1.6.18` +|`1.1.2` +| + +|`1.24-2023.11.14` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Includes patches for `CVE-2023-5528`. + +|`1.24-2023.10.19` +|`1.24.17` +|`1.6.18` +|`1.1.2` +|Upgraded `containerd` to `1.6.18`. Upgraded `kubelet` to `1.24.17`. Added new <> (`SERVICE_IPV4_CIDR` and `EXCLUDED_SNAT_CIDRS`). + +|`1.24-2023.09.12` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Upgraded the Amazon VPC CNI plugin to use the [.noloc]`Kubernetes` connector binary, which gets the [.noloc]`Pod` IP address from the [.noloc]`Kubernetes` API server. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/100[pull request #100]. + +|`1.24-2023.08.17` +|`1.24.16` +|`1.6.6` +|`1.1.2` +|Includes patches for `CVE-2023-3676`, `CVE-2023-3893`, and `CVE-2023-3955`. + +|`1.24-2023.08.08` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.07.11` +|`1.24.13` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.06.21` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Resolved issue that was causing the DNS suffix search list to be incorrectly populated. + +|`1.24-2023.06.14` +|`1.24.13` +|`1.6.6` +|`1.1.1` +|Upgraded [.noloc]`Kubernetes` to `1.24.13`. Added support for host port mapping in CNI. Merged https://github.com/aws/amazon-vpc-cni-plugins/pull/93[pull request #93]. + +|`1.24-2023.05.09` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Fixed a bug causing network connectivity https://github.com/aws/containers-roadmap/issues/1126[issue #1126] on pods after node restart. Introduced a new <> (`ExcludedSnatCIDRs`). + +|`1.24-2023.04.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Added recovery mechanism for `kubelet` and `kube-proxy` on service crash. + +|`1.24-2023.03.27` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|Installed a link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[domainless gMSA plugin,type="blog"] to facilitate [.noloc]`gMSA` authentication for [.noloc]`Windows` containers on Amazon EKS. + +|`1.24-2023.03.20` +|`1.24.7` +|`1.6.6` +|`1.1.1` +|[.noloc]`Kubernetes` version downgraded to `1.24.7` because `1.24.10` has a reported issue in `kube-proxy`. + +|`1.24-2023.02.14` +|`1.24.10` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.23` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2023.01.11` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.12.14` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| + +|`1.24-2022.10.12` +|`1.24.7` +|`1.6.6` +|`1.1.1` +| +|=== +==== diff --git a/latest/ug/nodes/eks-compute.adoc b/latest/ug/nodes/eks-compute.adoc new file mode 100644 index 00000000..b730be14 --- /dev/null +++ b/latest/ug/nodes/eks-compute.adoc @@ -0,0 +1,233 @@ +//!!NODE_ROOT +[[eks-compute,eks-compute.title]] += Manage compute resources by using nodes +:doctype: book +:sectnums: +:toc: left +:icons: font +:experimental: +:idprefix: +:idseparator: - +:sourcedir: . +:info_doctype: chapter +:info_title: Manage compute resources by using nodes +:info_titleabbrev: Manage compute +:keywords: nodes, node groups +:info_abstract: Your Amazon EKS cluster can schedule Pods on any combination of self-managed nodes, Amazon EKS managed node groups, and Fargate in the {aws} Cloud and hybrid nodes on-premises. + +include::../attributes.txt[] + +[abstract] +-- +Your Amazon EKS cluster can schedule [.noloc]`Pods` on any combination of self-managed nodes, Amazon EKS managed node groups, Fargate, and Amazon EKS Hybrid Nodes in the {aws} Cloud and hybrid nodes on-premises. +-- + +A [.noloc]`Kubernetes` node is a machine that runs containerized applications. Each node has the following components: + +* *https://kubernetes.io/docs/setup/production-environment/container-runtimes/[Container runtime]* – Software that's responsible for running the containers. +* *https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/[kubelet]* – Makes sure that containers are healthy and running within their associated [.noloc]`Pod`. +* *https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/[kube-proxy]* – Maintains network rules that allow communication to your [.noloc]`Pods`. + +For more information, see https://kubernetes.io/docs/concepts/architecture/nodes/[Nodes] in the [.noloc]`Kubernetes` documentation. + +Your Amazon EKS cluster can schedule [.noloc]`Pods` on any combination of <>, <>, <>, <>, and <>. To learn more about nodes deployed in your cluster, see <>. + +[IMPORTANT] +==== + +{aws} Fargate with Amazon EKS isn't available in {aws} GovCloud (US-East) and {aws} GovCloud (US-West). +Amazon EKS Hybrid Nodes isn't available in {aws} GovCloud Regions and China Regions. + +==== + +[NOTE] +==== + +Excluding hybrid nodes, nodes must be in the same VPC as the subnets you selected when you created the cluster. However, the nodes don't have to be in the same subnets. + +==== + +== Compare compute options + +The following table provides several criteria to evaluate when deciding which options best meet your requirements. Self-managed nodes are another option which support all of the criteria listed, but they require a lot more manual maintenance. For more information, see <>. + +[NOTE] +==== + +[.noloc]`Bottlerocket` has some specific differences from the general information in this table. For more information, see the [.noloc]`Bottlerocket` https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md[documentation] on [.noloc]`GitHub`. + +==== + + +[role="no-scroll"] +[cols="1,1,1,1", options="header"] +|=== +|Criteria +|EKS managed node groups +|EKS Auto Mode +|Amazon EKS Hybrid Nodes + +|Can be deployed to link:outposts/latest/userguide/what-is-outposts.html[{aws} Outposts,type="documentation"] +|No +|No +|No + +|Can be deployed to an <> +|Yes +|No +|No + +|Can run containers that require [.noloc]`Windows` +|Yes +|No +|No + +|Can run containers that require [.noloc]`Linux` +|Yes +|Yes +|Yes + +|Can run workloads that require the Inferentia chip +|<> – Amazon Linux nodes only +|Yes +|No + +|Can run workloads that require a GPU +|<> – Amazon Linux nodes only +|Yes +|Yes + +|Can run workloads that require Arm processors +|<> +|Yes +|Yes + +|Can run {aws} link:bottlerocket/[Bottlerocket,type="marketing"] +|Yes +|Yes +|No + +//GDC Removed fargate specific rows + +|Pods share CPU, memory, storage, and network resources with other Pods. +|Yes +|Yes +|Yes + +//GDC: add link here + +|Must deploy and manage Amazon EC2 instances +|Yes +|No - Learn about xref:automode-learn-instances[EC2 managed instances] +|Yes – the on-premises physical or virtual machines are managed by you with your choice of tooling. + +|Must secure, maintain, and patch the operating system of Amazon EC2 instances +|Yes +|No +|Yes – the operating system running on your physical or virtual machines are managed by you with your choice of tooling. + +|Can provide bootstrap arguments at deployment of a node, such as extra https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/[kubelet] arguments. +|Yes – Using `eksctl` or a <> with a custom AMI. +|No - xref:create-node-class[Use a `NodeClass` to configure nodes] +|Yes - you can customize bootstrap arguments with nodeadm. See <>. + +|Can assign IP addresses to [.noloc]`Pods` from a different CIDR block than the IP address assigned to the node. +|Yes – Using a launch template with a custom AMI. For more information, see <>. +|No +|Yes - see <>. + +|Can SSH into node +|Yes +|No - xref:auto-troubleshoot[Learn how to troubleshoot nodes] +|Yes + +|Can deploy your own custom AMI to nodes +|Yes – Using a <> +|No +|Yes + +|Can deploy your own custom CNI to nodes +|Yes – Using a <> with a custom AMI +|No +|Yes + +|Must update node AMI on your own +|<> – If you deployed an Amazon EKS optimized AMI, you're notified in the Amazon EKS console when updates are available. You can perform the update with one-click in the console. If you deployed a custom AMI, you're not notified in the Amazon EKS console when updates are available. You must perform the update on your own. +|No +|Yes - the operating system running on your physical or virtual machines is managed by you with your choice of tooling. See <>. + +|Must update node [.noloc]`Kubernetes` version on your own +|<> – If you deployed an Amazon EKS optimized AMI, you're notified in the Amazon EKS console when updates are available. You can perform the update with one-click in the console. If you deployed a custom AMI, you're not notified in the Amazon EKS console when updates are available. You must perform the update on your own. +|No +|Yes - you manage hybrid nodes upgrades with your own choice of tooling or with `nodeadm`. See <>. + +|Can use Amazon EBS storage with [.noloc]`Pods` +|<> +|Yes, as an integrated capability. Learn how to xref:create-storage-class[create a storage class.] +|No + +|Can use Amazon EFS storage with [.noloc]`Pods` +|<> +|Yes +|No + +|Can use Amazon FSx for Lustre storage with [.noloc]`Pods` +|<> +|Yes +|No + +|Can use Network Load Balancer for services +|<> +|Yes +|Yes - must use target type `ip`. + +|Pods can run in a public subnet +|Yes +|Yes +|No - pods run in on-premises environment. + +|Can assign different VPC security groups to individual [.noloc]`Pods` +|<> – [.noloc]`Linux` nodes only +|No +|No + +|Can run [.noloc]`Kubernetes` [.noloc]`DaemonSets` +|Yes +|Yes +|Yes + +|Support `HostPort` and `HostNetwork` in the [.noloc]`Pod` manifest +|Yes +|Yes +|Yes + +|{aws} Region availability +|link:general/latest/gr/eks.html[All Amazon EKS supported regions,type="documentation"] +|link:general/latest/gr/eks.html[All Amazon EKS supported regions,type="documentation"] +|link:general/latest/gr/eks.html[All Amazon EKS supported regions,type="documentation"] except the {aws} GovCloud (US) Regions and the China Regions. + +|Can run containers on Amazon EC2 dedicated hosts +|Yes +|No +|No + +|Pricing +|Cost of Amazon EC2 instance that runs multiple [.noloc]`Pods`. For more information, see link:ec2/pricing/[Amazon EC2 pricing,type="marketing"]. +| When EKS Auto Mode is enabled in your cluster, you pay a separate fee, in addition to the standard EC2 instance charges, for the instances launched using Auto Mode's compute capability. The amount varies with the instance type launched and the {aws} region where your cluster is located. For more information, see link:eks/pricing/["Amazon EKS pricing",type="marketing"]. +|Cost of hybrid nodes vCPU per hour. For more information, see link:eks/pricing/[Amazon EKS pricing,type="marketing"]. + +|=== + +include::managed-node-groups.adoc[leveloffset=+1] + +include::worker.adoc[leveloffset=+1] + +include::fargate.adoc[leveloffset=+1] + +include::choosing-instance-type.adoc[leveloffset=+1] + +include::eks-optimized-amis.adoc[leveloffset=+1] + +include::node-health.adoc[leveloffset=+1] + +include::hybrid-nodes.adoc[leveloffset=+1] diff --git a/latest/ug/nodes/eks-custom-ami-windows.adoc b/latest/ug/nodes/eks-custom-ami-windows.adoc new file mode 100644 index 00000000..273d87e7 --- /dev/null +++ b/latest/ug/nodes/eks-custom-ami-windows.adoc @@ -0,0 +1,115 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-custom-ami-windows,eks-custom-ami-windows.title]] += Build a custom [.noloc]`Windows` AMI with Image Builder +:info_titleabbrev: Custom builds + +[abstract] +-- +You can use EC2 Image Builder to create custom Amazon EKS optimized [.noloc]`Windows` AMIs. +-- + +You can use EC2 Image Builder to create custom Amazon EKS optimized [.noloc]`Windows` AMIs with one of the following options: + + + +* <> +* <> + +With both methods, you must create your own Image Builder recipe. For more information, see link:imagebuilder/latest/userguide/create-image-recipes.html[Create a new version of an image recipe,type="documentation"] in the Image Builder User Guide. + +[IMPORTANT] +==== + +The following *Amazon-managed* components for `eks` include patches for `CVE-2024-5321`. + +* `1.24.5` and higher +* `1.25.4` and higher +* `1.26.4` and higher +* `1.27.2` and higher +* `1.28.2` and higher +* `1.29.2` and higher +* `1.30.1` and higher + +==== + +[[custom-windows-ami-as-base,custom-windows-ami-as-base.title]] +== Using an Amazon EKS optimized [.noloc]`Windows` AMI as a base + +This option is the recommended way to build your custom [.noloc]`Windows` AMIs. The Amazon EKS optimized [.noloc]`Windows` AMIs we provide are more frequently updated than the Amazon-managed build component. + +. Start a new Image Builder recipe. ++ +.. Open the EC2 Image Builder console at https://console.aws.amazon.com/imagebuilder. +.. In the left navigation pane, choose *Image recipes*. +.. Choose *Create image recipe*. +. In the *Recipe details* section, enter a *Name* and *Version*. +. Specify the ID of the Amazon EKS optimized [.noloc]`Windows` AMI in the *Base image* section. ++ +.. Choose *Enter custom AMI ID*. +.. Retrieve the AMI ID for the [.noloc]`Windows` OS version that you require. For more information, see <>. +.. Enter the custom *AMI ID*. If the AMI ID isn't found, make sure that the {aws} Region for the AMI ID matches the {aws} Region shown in the upper right of your console. +. (Optional) To get the latest security updates, add the `update-windows` component in the *Build components -* section. ++ +.. From the dropdown list to the right of the *Find components by name* search box, choose *Amazon-managed*. +.. In the *Find components by name* search box, enter `update-windows`. +.. Select the check box of the *`update-windows`* search result. This component includes the latest [.noloc]`Windows` patches for the operating system. +. Complete the remaining image recipe inputs with your required configurations. For more information, see link:imagebuilder/latest/userguide/create-image-recipes.html#create-image-recipe-version-console[Create a new image recipe version (console),type="documentation"] in the Image Builder User Guide. +. Choose *Create recipe*. +. Use the new image recipe in a new or existing image pipeline. Once your image pipeline runs successfully, your custom AMI will be listed as an output image and is ready for use. For more information, see link:imagebuilder/latest/userguide/start-build-image-pipeline.html[Create an image pipeline using the EC2 Image Builder console wizard,type="documentation"]. + + +[[custom-windows-ami-build-component,custom-windows-ami-build-component.title]] +== Using the Amazon-managed build component + +When using an Amazon EKS optimized [.noloc]`Windows` AMI as a base isn't viable, you can use the Amazon-managed build component instead. This option may lag behind the most recent supported [.noloc]`Kubernetes` versions. + +. Start a new Image Builder recipe. ++ +.. Open the EC2 Image Builder console at https://console.aws.amazon.com/imagebuilder. +.. In the left navigation pane, choose *Image recipes*. +.. Choose *Create image recipe*. +. In the *Recipe details* section, enter a *Name* and *Version*. +. Determine which option you will be using to create your custom AMI in the *Base image* section: ++ +** *Select managed images* – Choose *Windows* for your *Image Operating System (OS)*. Then choose one of the following options for *Image origin*. ++ +*** *Quick start (Amazon-managed)* – In the *Image name* dropdown, choose an Amazon EKS supported [.noloc]`Windows` Server version. For more information, see <>. +*** *Images owned by me* – For *Image name*, choose the ARN of your own image with your own license. The image that you provide can't already have Amazon EKS components installed. +** *Enter custom AMI ID* – For AMI ID, enter the ID for your AMI with your own license. The image that you provide can't already have Amazon EKS components installed. +. In the *Build components - Windows* section, do the following: ++ +.. From the dropdown list to the right of the *Find components by name* search box, choose *Amazon-managed*. +.. In the *Find components by name* search box, enter `eks`. +.. Select the check box of the *`eks-optimized-ami-windows`* search result, even though the result returned may not be the version that you want. +.. In the *Find components by name* search box, enter `update-windows` . +.. Select the check box of the *update-windows* search result. This component includes the latest [.noloc]`Windows` patches for the operating system. +. In the *Selected components* section, do the following: ++ +.. Choose *Versioning options* for *`eks-optimized-ami-windows`*. +.. Choose *Specify component version*. +.. In the *Component Version* field, enter [.replaceable]`version.x`, replacing [.replaceable]`version` with a supported [.noloc]`Kubernetes` version. Entering an [.replaceable]`x` for part of the version number indicates to use the latest component version that also aligns with the part of the version you explicitly define. Pay attention to the console output as it will advise you on whether your desired version is available as a managed component. Keep in mind that the most recent [.noloc]`Kubernetes` versions may not be available for the build component. For more information about available versions, see <>. ++ +NOTE: The following `eks-optimized-ami-windows` build component versions require `eksctl` version `0.129` or lower: + +*** `1.24.0` + +. Complete the remaining image recipe inputs with your required configurations. For more information, see link:imagebuilder/latest/userguide/create-image-recipes.html#create-image-recipe-version-console[Create a new image recipe version (console),type="documentation"] in the Image Builder User Guide. +. Choose *Create recipe*. +. Use the new image recipe in a new or existing image pipeline. Once your image pipeline runs successfully, your custom AMI will be listed as an output image and is ready for use. For more information, see link:imagebuilder/latest/userguide/start-build-image-pipeline.html[Create an image pipeline using the EC2 Image Builder console wizard,type="documentation"]. + + +[[custom-windows-ami-component-versions,custom-windows-ami-component-versions.title]] +== Retrieving information about `eks-optimized-ami-windows` component versions + +You can retrieve specific information regarding what is installed with each component. For example, you can verify what `kubelet` version is installed. The components go through functional testing on the Amazon EKS supported [.noloc]`Windows` operating systems versions. For more information, see <>. Any other [.noloc]`Windows` OS versions that aren't listed as supported or have reached end of support might not be compatible with the component. + +. Open the EC2 Image Builder console at https://console.aws.amazon.com/imagebuilder. +. In the left navigation pane, choose *Components*. +. From the dropdown list to the right of the *Find components by name* search box, change *Owned by me* to *Quick start (Amazon-managed)*. +. In the *Find components by name* box, enter `eks`. +. (Optional) If you are using a recent version, sort the *Version* column in descending order by choosing it twice. +. Choose the *`eks-optimized-ami-windows`* link with a desired version. + +The *Description* in the resulting page shows the specific information. diff --git a/latest/ug/nodes/eks-linux-ami-versions.adoc b/latest/ug/nodes/eks-linux-ami-versions.adoc new file mode 100644 index 00000000..4c91a40e --- /dev/null +++ b/latest/ug/nodes/eks-linux-ami-versions.adoc @@ -0,0 +1,20 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-linux-ami-versions,eks-linux-ami-versions.title]] += Retrieve Amazon Linux AMI version information +:info_titleabbrev: Get version information + +[abstract] +-- +This topic gives the location of Amazon EKS optimized Amazon Linux AMIs version information. +-- + +Amazon EKS optimized Amazon Linux AMIs are versioned by [.noloc]`Kubernetes` version and the release date of the AMI in the following format: + +[source,none,subs="verbatim,attributes"] +---- +k8s_major_version.k8s_minor_version.k8s_patch_version-release_date +---- + +Each AMI release includes various versions of https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/[kubelet], the [.noloc]`Linux` kernel, and https://containerd.io/[containerd]. The accelerated AMIs also include various versions of the [.noloc]`NVIDIA` driver. You can find this version information in the https://github.com/awslabs/amazon-eks-ami/blob/main/CHANGELOG.md[Changelog] on [.noloc]`GitHub`. diff --git a/latest/ug/nodes/eks-optimized-ami-bottlerocket.adoc b/latest/ug/nodes/eks-optimized-ami-bottlerocket.adoc new file mode 100644 index 00000000..91ce64da --- /dev/null +++ b/latest/ug/nodes/eks-optimized-ami-bottlerocket.adoc @@ -0,0 +1,62 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-optimized-ami-bottlerocket,eks-optimized-ami-bottlerocket.title]] += Create nodes with optimized [.noloc]`Bottlerocket` AMIs +:info_titleabbrev: Bottlerocket + +[abstract] +-- +[.noloc]`Bottlerocket` is an open source [.noloc]`Linux` distribution that's sponsored and supported by {aws}. [.noloc]`Bottlerocket` includes only the essential software to run containers, which improves resource usage, reduces security threats, and lowers management overhead. +-- + +link:bottlerocket/[Bottlerocket,type="marketing"] is an open source [.noloc]`Linux` distribution that's sponsored and supported by {aws}. [.noloc]`Bottlerocket` is purpose-built for hosting container workloads. With [.noloc]`Bottlerocket`, you can improve the availability of containerized deployments and reduce operational costs by automating updates to your container infrastructure. [.noloc]`Bottlerocket` includes only the essential software to run containers, which improves resource usage, reduces security threats, and lowers management overhead. The [.noloc]`Bottlerocket` AMI includes `containerd`, `kubelet`, and {aws} IAM Authenticator. In addition to managed node groups and self-managed nodes, [.noloc]`Bottlerocket` is also supported by https://karpenter.sh/[Karpenter]. + +[[bottlerocket-advantages,bottlerocket-advantages.title]] +== Advantages + +Using [.noloc]`Bottlerocket` with your Amazon EKS cluster has the following advantages: + + + +* *Higher uptime with lower operational cost and lower management complexity* – [.noloc]`Bottlerocket` has a smaller resource footprint, shorter boot times, and is less vulnerable to security threats than other [.noloc]`Linux` distributions. [.noloc]`Bottlerocket's` smaller footprint helps to reduce costs by using less storage, compute, and networking resources. +* *Improved security from automatic OS updates* – Updates to [.noloc]`Bottlerocket` are applied as a single unit which can be rolled back, if necessary. This removes the risk of corrupted or failed updates that can leave the system in an unusable state. With [.noloc]`Bottlerocket`, security updates can be automatically applied as soon as they're available in a minimally disruptive manner and be rolled back if failures occur. +* *Premium support* – {aws} provided builds of [.noloc]`Bottlerocket` on Amazon EC2 is covered under the same {aws} Support plans that also cover {aws} services such as Amazon EC2, Amazon EKS, and Amazon ECR. + + +[[bottlerocket-considerations,bottlerocket-considerations.title]] +== Considerations + +Consider the following when using [.noloc]`Bottlerocket` for your AMI type: + + + +* [.noloc]`Bottlerocket` supports Amazon EC2 instances with `x86_64` and `arm64` processors. The [.noloc]`Bottlerocket` AMI isn't recommended for use with Amazon EC2 instances with an Inferentia chip. +* [.noloc]`Bottlerocket` images don't include an SSH server or a shell. You can employ out-of-band access methods to allow SSH. These approaches enable the admin container and to pass some bootstrapping configuration steps with user data. For more information, refer to the following sections in https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md[Bottlerocket OS] on [.noloc]`GitHub`: ++ +** https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#exploration[Exploration] +** https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#admin-container[Admin container] +** https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#kubernetes-settings[Kubernetes settings] +* [.noloc]`Bottlerocket` uses different container types: ++ +** By default, a https://github.com/bottlerocket-os/bottlerocket-control-container[control container] is enabled. This container runs the https://github.com/aws/amazon-ssm-agent[{aws} Systems Manager agent] that you can use to run commands or start shell sessions on Amazon EC2 [.noloc]`Bottlerocket` instances. For more information, see link:systems-manager/latest/userguide/session-manager-getting-started.html[Setting up Session Manager,type="documentation"] in the _{aws} Systems Manager User Guide_. +** If an SSH key is given when creating the node group, an admin container is enabled. We recommend using the admin container only for development and testing scenarios. We don't recommend using it for production environments. For more information, see https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#admin-container[Admin container] on [.noloc]`GitHub`. + + +[[bottlerocket-more-information,bottlerocket-more-information.title]] +== More information + +For more information about using Amazon EKS optimized [.noloc]`Bottlerocket` AMIs, see the following sections: + +* For details about [.noloc]`Bottlerocket`, see the https://bottlerocket.dev/en/[Bottlerocket Documentation]. +* For version information resources, see <>. +* To use [.noloc]`Bottlerocket` with managed node groups, see <>. +* To launch self-managed [.noloc]`Bottlerocket` nodes, see <>. +* To retrieve the latest IDs of the Amazon EKS optimized [.noloc]`Bottlerocket` AMIs, see <>. +* For details on compliance support, see <>. + +include::eks-ami-versions-bottlerocket.adoc[leveloffset=+1] + +include::retrieve-ami-id-bottlerocket.adoc[leveloffset=+1] + +include::bottlerocket-compliance-support.adoc[leveloffset=+1] diff --git a/latest/ug/nodes/eks-optimized-ami.adoc b/latest/ug/nodes/eks-optimized-ami.adoc new file mode 100644 index 00000000..3b1549e1 --- /dev/null +++ b/latest/ug/nodes/eks-optimized-ami.adoc @@ -0,0 +1,84 @@ +//!!NODE_ROOT
+[.topic] +[[eks-optimized-ami,eks-optimized-ami.title]] += Create nodes with optimized Amazon Linux AMIs +:info_titleabbrev: Amazon Linux + +include::../attributes.txt[] + +include::al2023.adoc[leveloffset=+1] + +include::eks-linux-ami-versions.adoc[leveloffset=+1] + +include::retrieve-ami-id.adoc[leveloffset=+1] + +include::eks-ami-build-scripts.adoc[leveloffset=+1] + +[abstract] +-- +The Amazon EKS optimized Amazon Linux AMIs are built on top of Amazon Linux 2 (AL2) and Amazon Linux 2023 (AL2023). They are configured to serve as the base images for Amazon EKS nodes. +-- + +The Amazon EKS optimized Amazon Linux AMIs are built on top of Amazon Linux 2 (AL2) and Amazon Linux 2023 (AL2023). They are configured to serve as the base images for Amazon EKS nodes. The AMIs are configured to work with Amazon EKS and they include the following components: + +* `kubelet` +* {aws} IAM Authenticator +* [.noloc]`Docker` (Amazon EKS version `1.23` and earlier) +* `containerd` + +[NOTE] +==== + +* You can track security or privacy events for Amazon Linux at the https://alas.aws.amazon.com/[Amazon Linux security center] by choosing the tab for your desired version. You can also subscribe to the applicable RSS feed. Security and privacy events include an overview of the issue, what packages are affected, and how to update your instances to correct the issue. +* Before deploying an accelerated or [.noloc]`Arm` AMI, review the information in <> and <>. +* For [.noloc]`Kubernetes` version `1.23`, you can use an optional bootstrap flag to test migration from [.noloc]`Docker` to `containerd`. For more information, see <>. +* Amazon EC2 `P2` instances aren't supported on Amazon EKS because they require `NVIDIA` driver version 470 or earlier. +* Any newly created managed node groups in clusters on version `1.30` or newer will automatically default to using AL2023 as the node operating system. Previously, new node groups would default to AL2. You can continue to use AL2 by choosing it as the AMI type when creating a new node group. +* Support for AL2 will end on June 30th, 2025. For more information, see link:amazon-linux-2/faqs/[Amazon Linux 2 FAQs,type="marketing"]. + +==== + +[[gpu-ami,gpu-ami.title]] +== Amazon EKS optimized accelerated Amazon Linux AMIs + +The Amazon EKS optimized accelerated Amazon Linux AMIs are built on top of the standard Amazon EKS optimized Amazon Linux AMIs. They are configured to serve as optional images for Amazon EKS nodes to support GPU, link:machine-learning/inferentia/[Inferentia,type="marketing"], and link:machine-learning/trainium/[Trainium,type="marketing"] based workloads. + +In addition to the standard Amazon EKS optimized AMI configuration, the accelerated AMIs include the following: + +* [.noloc]`NVIDIA` drivers +* `nvidia-container-toolkit` +* {aws} [.noloc]`Neuron` driver + +For a list of the latest components included in the accelerated AMIs, see the `amazon-eks-ami` https://github.com/awslabs/amazon-eks-ami/releases[Releases] on [.noloc]`GitHub`. + +[NOTE] +==== + +* Make sure to specify the applicable instance type in your node {aws} CloudFormation template. By using the Amazon EKS optimized accelerated AMIs, you agree to https://s3.amazonaws.com/EULA/NVidiaEULAforAWS.pdf[NVIDIA's Cloud End User License Agreement (EULA)]. +* The Amazon EKS optimized accelerated AMIs were previously referred to as the _Amazon EKS optimized AMIs with GPU support_. +* Previous versions of the Amazon EKS optimized accelerated AMIs installed the `nvidia-docker` repository. The repository is no longer included in Amazon EKS AMI version `v20200529` and later. + +==== + +For details on running workloads on Amazon EKS optimized accelerated Amazon Linux AMIs, see <>. + +[[arm-ami,arm-ami.title]] +== Amazon EKS optimized [.noloc]`Arm` Amazon Linux AMIs + +Arm instances deliver significant cost savings for scale-out and [.noloc]`Arm`-based applications such as web servers, containerized microservices, caching fleets, and distributed data stores. When adding [.noloc]`Arm` nodes to your cluster, review the following considerations. + +* If your cluster was deployed before August 17, 2020, you must do a one-time upgrade of critical cluster add-on manifests. This is so that [.noloc]`Kubernetes` can pull the correct image for each hardware architecture in use in your cluster. For more information about updating cluster add-ons, see <>. If you deployed your cluster on or after August 17, 2020, then your [.noloc]`CoreDNS`, `kube-proxy`, and [.noloc]`Amazon VPC CNI plugin for Kubernetes` add-ons are already multi-architecture capable. +* Applications deployed to [.noloc]`Arm` nodes must be compiled for Arm. +* If you have [.noloc]`DaemonSets` that are deployed in an existing cluster, or you want to deploy them to a new cluster that you also want to deploy [.noloc]`Arm` nodes in, then verify that your [.noloc]`DaemonSet` can run on all hardware architectures in your cluster. +* You can run [.noloc]`Arm` node groups and x86 node groups in the same cluster. If you do, consider deploying multi-architecture container images to a container repository such as Amazon Elastic Container Registry and then adding node selectors to your manifests so that [.noloc]`Kubernetes` knows what hardware architecture a [.noloc]`Pod` can be deployed to. For more information, see link:AmazonECR/latest/userguide/docker-push-multi-architecture-image.html[Pushing a multi-architecture image,type="documentation"] in the _Amazon ECR User Guide_ and the link:containers/introducing-multi-architecture-container-images-for-amazon-ecr[Introducing multi-architecture container images for Amazon ECR,type="blog"] blog post. + +[[linux-more-information,linux-more-information.title]] +== More information + +For more information about using Amazon EKS optimized Amazon Linux AMIs, see the following sections: + +* To use Amazon Linux with managed node groups, see <>. +* To launch self-managed Amazon Linux nodes, see <>. +* For version information, see <>. +* To retrieve the latest IDs of the Amazon EKS optimized Amazon Linux AMIs, see <>. +* For open-source scripts that are used to build the Amazon EKS optimized AMIs, see <>. diff --git a/latest/ug/nodes/eks-optimized-amis.adoc b/latest/ug/nodes/eks-optimized-amis.adoc new file mode 100644 index 00000000..f7dccea0 --- /dev/null +++ b/latest/ug/nodes/eks-optimized-amis.adoc @@ -0,0 +1,35 @@ +//!!NODE_ROOT
+[.topic] +[[eks-optimized-amis,eks-optimized-amis.title]] += Create nodes with pre-built optimized images +:info_doctype: section +:info_title: Create nodes with pre-built optimized images +:info_titleabbrev: Pre-built optimized AMIs +:keywords: optimized, custom, AMI +:info_abstract: You can deploy nodes with pre-built Amazon EKS optimized Amazon Machine Images (AMIs) or your own custom \ + AMIs + +include::../attributes.txt[] + +[abstract] +-- +You can deploy nodes with pre-built Amazon EKS optimized link:AWSEC2/latest/UserGuide/AMIs.html[Amazon Machine Images,type="documentation"] (AMIs) or your own custom AMIs. +-- + +You can deploy nodes with pre-built Amazon EKS optimized link:AWSEC2/latest/UserGuide/AMIs.html[Amazon Machine Images,type="documentation"] (AMIs) or your own custom AMIs when you use managed node groups or self-managed nodes. If you are running hybrid nodes, see <>. For information about each type of Amazon EKS optimized AMI, see one of the following topics. For instructions on how to create your own custom AMI, see <>. + +With Amazon EKS Auto Mode, EKS manages the EC2 instance including selecting and updating the AMI. + +[.topiclist] +[[Topic List]] + +[.topic] +include::dockershim-deprecation.adoc[leveloffset=+1] + +include::eks-optimized-ami.adoc[leveloffset=+1] + +include::eks-optimized-ami-bottlerocket.adoc[leveloffset=+1] + +include::eks-partner-amis.adoc[leveloffset=+1] + +include::eks-optimized-windows-ami.adoc[leveloffset=+1] diff --git a/latest/ug/nodes/eks-optimized-windows-ami.adoc b/latest/ug/nodes/eks-optimized-windows-ami.adoc new file mode 100644 index 00000000..1ed169a8 --- /dev/null +++ b/latest/ug/nodes/eks-optimized-windows-ami.adoc @@ -0,0 +1,150 @@ +//!!NODE_ROOT
+[.topic] +[[eks-optimized-windows-ami,eks-optimized-windows-ami.title]] += Create nodes with optimized [.noloc]`Windows` AMIs +:info_titleabbrev: Windows + +include::../attributes.txt[] + +[abstract] +-- +[.noloc]`Windows` Amazon EKS optimized AMIs are built on top of [.noloc]`Windows` Server 2019. +-- + +[.noloc]`Windows` Amazon EKS optimized AMIs are built on top of [.noloc]`Windows` Server 2019 and [.noloc]`Windows` Server 2022. They are configured to serve as the base image for Amazon EKS nodes. By default, the AMIs include the following components: + +* https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/[kubelet] +* https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/[kube-proxy] +* https://github.com/kubernetes-sigs/aws-iam-authenticator[{aws} IAM Authenticator for Kubernetes] +* https://github.com/kubernetes-csi/csi-proxy[csi-proxy] +* https://containerd.io/[containerd] + + +[NOTE] +==== + +You can track security or privacy events for [.noloc]`Windows` Server with the https://portal.msrc.microsoft.com/en-us/security-guidance[Microsoft security update guide]. + +==== + +Amazon EKS offers AMIs that are optimized for [.noloc]`Windows` containers in the following variants: + +* Amazon EKS-optimized [.noloc]`Windows` Server 2019 Core AMI +* Amazon EKS-optimized [.noloc]`Windows` Server 2019 Full AMI +* Amazon EKS-optimized [.noloc]`Windows` Server 2022 Core AMI +* Amazon EKS-optimized [.noloc]`Windows` Server 2022 Full AMI + + +[IMPORTANT] +==== + +* The Amazon EKS-optimized [.noloc]`Windows` Server [.noloc]`20H2` Core AMI is deprecated. No new versions of this AMI will be released. +* To ensure that you have the latest security updates by default, Amazon EKS maintains optimized [.noloc]`Windows` AMIs for the last 4 months. Each new AMI will be available for 4 months from the time of initial release. After this period, older AMIs are made private and are no longer accessible. We encourage using the latest AMIs to avoid security vulnerabilities and losing access to older AMIs which have reached the end of their supported lifetime. While we can't guarantee that we can provide access to AMIs that have been made private, you can request access by filing a ticket with {aws} Support. + +==== + +[[windows-ami-release-calendar,windows-ami-release-calendar.title]] +== Release calendar + +The following table lists the release and end of support dates for [.noloc]`Windows` versions on Amazon EKS. If an end date is blank, it's because the version is still supported. + +[cols="1,1,1", options="header"] +|=== +|Windows version +|Amazon EKS release +|Amazon EKS end of support + + +|[.noloc]`Windows` Server 2022 Core +|[.noloc]`10/17/2022` +| + +|[.noloc]`Windows` Server 2022 Full +|[.noloc]`10/17/2022` +| + +|[.noloc]`Windows` Server [.noloc]`20H2` Core +|[.noloc]`8/12/2021` +|[.noloc]`8/9/2022` + +|[.noloc]`Windows` Server 2004 Core +|[.noloc]`8/19/2020` +|[.noloc]`12/14/2021` + +|[.noloc]`Windows` Server 2019 Core +|[.noloc]`10/7/2019` +| + +|[.noloc]`Windows` Server 2019 Full +|[.noloc]`10/7/2019` +| + +|[.noloc]`Windows` Server 1909 Core +|[.noloc]`10/7/2019` +|[.noloc]`12/8/2020` +|=== + +[[bootstrap-script-configuration-parameters,bootstrap-script-configuration-parameters.title]] +== Bootstrap script configuration parameters + +When you create a [.noloc]`Windows` node, there's a script on the node that allows for configuring different parameters. Depending on your setup, this script can be found on the node at a location similar to: `C:\Program Files\Amazon\EKS\Start-EKSBootstrap.ps1`. You can specify custom parameter values by specifying them as arguments to the bootstrap script. For example, you can update the user data in the launch template. For more information, see <>. + +The script includes the following command-line parameters: + +* `-EKSClusterName` – Specifies the Amazon EKS cluster name for this worker node to join. +* `-KubeletExtraArgs` – Specifies extra arguments for `kubelet` (optional). +* `-KubeProxyExtraArgs` – Specifies extra arguments for `kube-proxy` (optional). +* `-APIServerEndpoint` – Specifies the Amazon EKS cluster API server endpoint (optional). Only valid when used with `-Base64ClusterCA`. Bypasses calling `Get-EKSCluster`. +* `-Base64ClusterCA` – Specifies the base64 encoded cluster CA content (optional). Only valid when used with `-APIServerEndpoint`. Bypasses calling `Get-EKSCluster`. +* `-DNSClusterIP` – Overrides the IP address to use for DNS queries within the cluster (optional). Defaults to `10.100.0.10` or `172.20.0.10` based on the IP address of the primary interface. +* `-ServiceCIDR` – Overrides the [.noloc]`Kubernetes` service IP address range from which cluster services are addressed. Defaults to `172.20.0.0/16` or `10.100.0.0/16` based on the IP address of the primary interface. +* `-ExcludedSnatCIDRs` – A list of `IPv4` CIDRs to exclude from Source Network Address Translation (SNAT). This means that the pod private IP which is VPC addressable wouldn't be translated to the IP address of the instance ENI's primary `IPv4` address for outbound traffic. By default, the `IPv4` CIDR of the VPC for the Amazon EKS [.noloc]`Windows` node is added. Specifying CIDRs to this parameter also additionally excludes the specified CIDRs. For more information, see <>. + +In addition to the command line parameters, you can also specify some environment variable parameters. When specifying a command line parameter, it takes precedence over the respective environment variable. The environment variable(s) should be defined as machine (or system) scoped as the bootstrap script will only read machine-scoped variables. + +The script takes into account the following environment variables: + +* `SERVICE_IPV4_CIDR` – Refer to the `ServiceCIDR` command line parameter for the definition. +* `EXCLUDED_SNAT_CIDRS` – Should be a comma separated string. Refer to the `ExcludedSnatCIDRs` command line parameter for the definition. + + +[[ad-and-gmsa-support,ad-and-gmsa-support.title]] +=== [.noloc]`gMSA` authentication support + +Amazon EKS Windows [.noloc]`Pods` allow different types of group Managed Service Account ([.noloc]`gMSA`) authentication. + +* Amazon EKS supports [.noloc]`Active Directory` domain identities for authentication. For more information on domain-joined [.noloc]`gMSA`, see link:containers/windows-authentication-on-amazon-eks-windows-pods[Windows Authentication on Amazon EKS Windowspods,type="blog"] on the {aws} blog. +* Amazon EKS offers a plugin that enables non-domain-joined [.noloc]`Windows` nodes to retrieve [.noloc]`gMSA` credentials with a portable user identity. For more information on domainless [.noloc]`gMSA`, see link:containers/domainless-windows-authentication-for-amazon-eks-windows-pods[Domainless Windows Authentication for Amazon EKS Windowspods,type="blog"] on the {aws} blog. + + +[[windows-cached-container-images,windows-cached-container-images.title]] +== Cached container images + +Amazon EKS Windows optimized AMIs have certain container images cached for the `containerd` runtime. Container images are cached when building custom AMIs using Amazon-managed build components. For more information, see <>. + +The following cached container images are for the `containerd` runtime: + +* `amazonaws.com/eks/pause-windows` +* `mcr.microsoft.com/windows/nanoserver` +* `mcr.microsoft.com/windows/servercore` + +[[windows-more-information,windows-more-information.title]] +== More information + +For more information about using Amazon EKS optimized [.noloc]`Windows` AMIs, see the following sections: + +* For details on running workloads on Amazon EKS optimized accelerated Windows AMIs, see <>. +* To use [.noloc]`Windows` with managed node groups, see <>. +* To launch self-managed [.noloc]`Windows` nodes, see <>. +* For version information, see <>. +* To retrieve the latest IDs of the Amazon EKS optimized [.noloc]`Windows` AMIs, see <>. +* To use Amazon EC2 Image Builder to create custom Amazon EKS optimized [.noloc]`Windows` AMIs, see <>. +* For best practices, see https://aws.github.io/aws-eks-best-practices/windows/docs/ami/[Amazon EKS optimized Windows AMI management] in the _EKS Best Practices Guide_. + +include::self-managed-windows-server-2022.adoc[leveloffset=+1] + +include::eks-ami-versions-windows.adoc[leveloffset=+1] + +include::retrieve-windows-ami-id.adoc[leveloffset=+1] + +include::eks-custom-ami-windows.adoc[leveloffset=+1] diff --git a/latest/ug/nodes/eks-partner-amis.adoc b/latest/ug/nodes/eks-partner-amis.adoc new file mode 100644 index 00000000..b52ddc99 --- /dev/null +++ b/latest/ug/nodes/eks-partner-amis.adoc @@ -0,0 +1,15 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[eks-partner-amis,eks-partner-amis.title]] += Create nodes with optimized [.noloc]`Ubuntu Linux` AMIs +:info_titleabbrev: Ubuntu Linux + +[abstract] +-- +Canonical has partnered with Amazon EKS to create node AMIs that you can use in your clusters. +-- + +Canonical has partnered with Amazon EKS to create node AMIs that you can use in your clusters. + +https://www.canonical.com/[Canonical] delivers a built-for-purpose [.noloc]`Kubernetes` Node OS image. This minimized [.noloc]`Ubuntu` image is optimized for Amazon EKS and includes the custom {aws} kernel that is jointly developed with {aws}. For more information, see https://cloud-images.ubuntu.com/aws-eks/[Ubuntu on Amazon Elastic Kubernetes Service (EKS)] and <> . For information about support, see the link:premiumsupport/faqs/#Third-party_software[Third-party software,type="marketing"] section of the _{aws} Premium Support FAQs_. diff --git a/latest/ug/nodes/fargate-getting-started.adoc b/latest/ug/nodes/fargate-getting-started.adoc new file mode 100644 index 00000000..6f1f3c81 --- /dev/null +++ b/latest/ug/nodes/fargate-getting-started.adoc @@ -0,0 +1,179 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[fargate-getting-started,fargate-getting-started.title]] += Get started with {aws} Fargate for your cluster +:info_titleabbrev: Get started + +[abstract] +-- +This topic describes how to get started running [.noloc]`Pods` on {aws} Fargate with your Amazon EKS cluster. +-- + +[IMPORTANT] +==== + +{aws} Fargate with Amazon EKS isn't available in {aws} GovCloud (US-East) and {aws} GovCloud (US-West). + +==== + +This topic describes how to get started running [.noloc]`Pods` on {aws} Fargate with your Amazon EKS cluster. + +If you restrict access to the public endpoint of your cluster using CIDR blocks, we recommend that you also enable private endpoint access. This way, Fargate [.noloc]`Pods` can communicate with the cluster. Without the private endpoint enabled, the CIDR blocks that you specify for public access must include the outbound sources from your VPC. For more information, see <>. + +.Prerequisite +An existing cluster. If you don't already have an Amazon EKS cluster, see <>. + +[[fargate-gs-check-compatibility,fargate-gs-check-compatibility.title]] +== Step 1: Ensure that existing nodes can communicate with Fargate [.noloc]`Pods` + +If you're working with a new cluster with no nodes, or a cluster with only managed node groups (see <>), you can skip to <>. + +Assume that you're working with an existing cluster that already has nodes that are associated with it. Make sure that [.noloc]`Pods` on these nodes can communicate freely with the [.noloc]`Pods` that are running on Fargate. [.noloc]`Pods` that are running on Fargate are automatically configured to use the cluster security group for the cluster that they're associated with. Ensure that any existing nodes in your cluster can send and receive traffic to and from the cluster security group. Managed node groups are automatically configured to use the cluster security group as well, so you don't need to modify or check them for this compatibility (see <>). + +For existing node groups that were created with `eksctl` or the Amazon EKS managed {aws} CloudFormation templates, you can add the cluster security group to the nodes manually. Or, alternatively, you can modify the Auto Scaling group launch template for the node group to attach the cluster security group to the instances. For more information, see link:vpc/latest/userguide/VPC_SecurityGroups.html#SG_Changing_Group_Membership[Changing an instance's security groups,type="documentation"] in the _Amazon VPC User Guide_. + +You can check for a security group for your cluster in the {aws-management-console} under the *Networking* section for the cluster. Or, you can do this using the following {aws} CLI command. When using this command, replace [.replaceable]`my-cluster` with the name of your cluster. + +[source,bash,subs="verbatim,attributes"] +---- +aws eks describe-cluster --name my-cluster --query cluster.resourcesVpcConfig.clusterSecurityGroupId +---- + + +[[fargate-sg-pod-execution-role,fargate-sg-pod-execution-role.title]] +== Step 2: Create a Fargate [.noloc]`Pod` execution role + +When your cluster creates [.noloc]`Pods` on {aws} Fargate, the components that run on the Fargate infrastructure must make calls to {aws} APIs on your behalf. The Amazon EKS [.noloc]`Pod` execution role provides the IAM permissions to do this. To create an {aws} Fargate [.noloc]`Pod` execution role, see <>. + +[NOTE] +==== + +If you created your cluster with `eksctl` using the `--fargate` option, your cluster already has a [.noloc]`Pod` execution role that you can find in the IAM console with the pattern `eksctl-my-cluster-FargatePodExecutionRole-ABCDEFGHIJKL`. Similarly, if you use `eksctl` to create your Fargate profiles, `eksctl` creates your [.noloc]`Pod` execution role if one isn't already created. + +==== + +[[fargate-gs-create-profile,fargate-gs-create-profile.title]] +== Step 3: Create a Fargate profile for your cluster + +Before you can schedule [.noloc]`Pods` that are running on Fargate in your cluster, you must define a Fargate profile that specifies which [.noloc]`Pods` use Fargate when they're launched. For more information, see <>. + +[NOTE] +==== + +If you created your cluster with `eksctl` using the `--fargate` option, then a Fargate profile is already created for your cluster with selectors for all [.noloc]`Pods` in the `kube-system` and `default` namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate. + +==== + +You can create a Fargate profile using either of these tools: + +* <> +* <> + +=== `eksctl` [[eksctl_fargate_profile_create]] + +This procedure requires `eksctl` version `{eksctl-min-version}` or later. You can check your version with the following command: + +[source,bash,subs="verbatim,attributes"] +---- +eksctl version +---- + +For instructions on how to install or upgrade `eksctl`, see https://eksctl.io/installation[Installation] in the `eksctl` documentation. + +*To create a Fargate profile with `eksctl`* + +Create your Fargate profile with the following `eksctl` command, replacing every [.replaceable]`example value` with your own values. You're required to specify a namespace. However, the `--labels` option isn't required. + +[source,bash,subs="verbatim,attributes"] +---- +eksctl create fargateprofile \ + --cluster my-cluster \ + --name my-fargate-profile \ + --namespace my-kubernetes-namespace \ + --labels key=value +---- + +You can use certain wildcards for [.replaceable]`my-kubernetes-namespace` and [.replaceable]`key=value` labels. For more information, see <>. + +=== {aws-management-console} [[console_fargate_profile_create]] + +*To create a Fargate profile with {aws-management-console}* + +. Open the link:eks/home#/clusters[Amazon EKS console,type="console"]. +. Choose the cluster to create a Fargate profile for. +. Choose the *Compute* tab. +. Under *Fargate profiles*, choose *Add Fargate profile*. +. On the *Configure Fargate profile* page, do the following: ++ +.. For *Name*, enter a name for your Fargate profile. The name must be unique. +.. For *Pod execution role*, choose the [.noloc]`Pod` execution role to use with your Fargate profile. Only the IAM roles with the `eks-fargate-pods.amazonaws.com` service principal are shown. If you don't see any roles listed, you must create one. For more information, see <>. +.. Modify the selected *Subnets* as needed. ++ +NOTE: Only private subnets are supported for [.noloc]`Pods` that are running on Fargate. +.. For *Tags*, you can optionally tag your Fargate profile. These tags don't propagate to other resources that are associated with the profile such as [.noloc]`Pods`. +.. Choose *Next*. +. On the *Configure [.noloc]`Pod` selection* page, do the following: ++ +.. For *Namespace*, enter a namespace to match for [.noloc]`Pods`. ++ +*** You can use specific namespaces to match, such as `kube-system` or `default`. +*** You can use certain wildcards (for example, `prod-*`) to match multiple namespaces (for example, `prod-deployment` and `prod-test`). For more information, see <>. +.. (Optional) Add [.noloc]`Kubernetes` labels to the selector. Specifically add them to the one that the [.noloc]`Pods` in the specified namespace need to match. ++ +*** You can add the label `infrastructure: fargate` to the selector so that only [.noloc]`Pods` in the specified namespace that also have the `infrastructure: fargate` [.noloc]`Kubernetes` label match the selector. +*** You can use certain wildcards (for example, `key?: value?`) to match multiple namespaces (for example, `keya: valuea` and `keyb: valueb`). For more information, see <>. +.. Choose *Next*. +. On the *Review and create* page, review the information for your Fargate profile and choose *Create*. + + +[[fargate-gs-coredns,fargate-gs-coredns.title]] +== Step 4: Update [.noloc]`CoreDNS` + +By default, [.noloc]`CoreDNS` is configured to run on Amazon EC2 infrastructure on Amazon EKS clusters. If you want to _only_ run your [.noloc]`Pods` on Fargate in your cluster, complete the following steps. + +[NOTE] +==== + +If you created your cluster with `eksctl` using the `--fargate` option, then you can skip to <>. + +==== +. Create a Fargate profile for [.noloc]`CoreDNS` with the following command. Replace [.replaceable]`my-cluster` with your cluster name, [.replaceable]`111122223333` with your account ID, [.replaceable]`AmazonEKSFargatePodExecutionRole` with the name of your [.noloc]`Pod` execution role, and [.replaceable]`0000000000000001`, [.replaceable]`0000000000000002`, and [.replaceable]`0000000000000003` with the IDs of your private subnets. If you don't have a [.noloc]`Pod` execution role, you must create one first (see <>). ++ +IMPORTANT: The role ARN can't include a link:IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names[path,type="documentation"] other than `/`. For example, if the name of your role is `development/apps/my-role`, you need to change it to `my-role` when specifying the ARN for the role. The format of the role ARN must be `{arn-aws}iam::111122223333:role/role-name`. ++ +[source,bash,subs="verbatim,attributes"] +---- +aws eks create-fargate-profile \ + --fargate-profile-name coredns \ + --cluster-name my-cluster \ + --pod-execution-role-arn {arn-aws}iam::111122223333:role/AmazonEKSFargatePodExecutionRole \ + --selectors namespace=kube-system,labels={k8s-app=kube-dns} \ + --subnets subnet-0000000000000001 subnet-0000000000000002 subnet-0000000000000003 +---- +. Run the following command to remove the `eks.amazonaws.com/compute-type : ec2` annotation from the [.noloc]`CoreDNS` [.noloc]`Pods`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl patch deployment coredns \ + -n kube-system \ + --type json \ + -p='[{"op": "remove", "path": "/spec/template/metadata/annotations/eks.amazonaws.com~1compute-type"}]' +---- + + +[[fargate-gs-next-steps,fargate-gs-next-steps.title]] +== Next steps + +* You can start migrating your existing applications to run on Fargate with the following workflow. ++ +.. <> that matches your application's [.noloc]`Kubernetes` namespace and [.noloc]`Kubernetes` labels. +.. Delete and re-create any existing [.noloc]`Pods` so that they're scheduled on Fargate. For example, the following command triggers a rollout of the `coredns` deployment. You can modify the namespace and deployment type to update your specific [.noloc]`Pods`. ++ +[source,bash,subs="verbatim,attributes"] +---- +kubectl rollout restart -n kube-system deployment coredns +---- +* Deploy the <> to allow Ingress objects for your [.noloc]`Pods` running on Fargate. +* You can use the <> to set the initial correct size of CPU and memory for your Fargate [.noloc]`Pods`, and then use the <> to scale those [.noloc]`Pods`. If you want the Vertical Pod Autoscaler to automatically re-deploy [.noloc]`Pods` to Fargate with higher CPU and memory combinations, set the Vertical Pod Autoscaler's mode to either `Auto` or `Recreate`. This is to ensure correct functionality. For more information, see the https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler#quick-start[Vertical Pod Autoscaler] documentation on [.noloc]`GitHub`. +* You can set up the link:otel[{aws} Distro for OpenTelemetry,type="marketing"] (ADOT) collector for application monitoring by following link:AmazonCloudWatch/latest/monitoring/Container-Insights-EKS-otel.html[these instructions,type="documentation"]. diff --git a/latest/ug/nodes/fargate-logging.adoc b/latest/ug/nodes/fargate-logging.adoc new file mode 100644 index 00000000..aedd924b --- /dev/null +++ b/latest/ug/nodes/fargate-logging.adoc @@ -0,0 +1,426 @@ +//!!NODE_ROOT
+include::../attributes.txt[] +[.topic] +[[fargate-logging,fargate-logging.title]] += Start {aws} Fargate logging for your cluster +:info_titleabbrev: Logging + +[abstract] +-- +Amazon EKS on Fargate offers a built-in log router based on [.noloc]`Fluent Bit`. +-- + +[IMPORTANT] +==== + +{aws} Fargate with Amazon EKS isn't available in {aws} GovCloud (US-East) and {aws} GovCloud (US-West). + +==== + +Amazon EKS on Fargate offers a built-in log router based on [.noloc]`Fluent Bit`. This means that you don't explicitly run a [.noloc]`Fluent Bit` container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated `ConfigMap` that must meet the following criteria: + +* Named `aws-logging` +* Created in a dedicated namespace called `aws-observability` +* Can't exceed 5300 characters. + +Once you've created the `ConfigMap`, Amazon EKS on Fargate automatically detects it and configures the log router with it. Fargate uses a version of {aws} for [.noloc]`Fluent Bit`, an upstream compliant distribution of [.noloc]`Fluent Bit` managed by {aws}. For more information, see https://github.com/aws/aws-for-fluent-bit[{aws} for Fluent Bit] on GitHub. + +The log router allows you to use the breadth of services at {aws} for log analytics and storage. You can stream logs from Fargate directly to Amazon CloudWatch, Amazon OpenSearch Service. You can also stream logs to destinations such as link:s3/[Amazon S3,type="marketing"], link:kinesis/data-streams/[Amazon Kinesis Data Streams,type="marketing"], and partner tools through link:kinesis/data-firehose/[Amazon Data Firehose,type="marketing"]. + +* An existing Fargate profile that specifies an existing [.noloc]`Kubernetes` namespace that you deploy Fargate [.noloc]`Pods` to. For more information, see <>. +* An existing Fargate [.noloc]`Pod` execution role. For more information, see <>. + +[[fargate-logging-log-router-configuration,fargate-logging-log-router-configuration.title]] +== Log router configuration + +In the following steps, replace every [.replaceable]`example value` with your own values. + +. Create a dedicated [.noloc]`Kubernetes` namespace named `aws-observability`. ++ +.. Save the following contents to a file named `[.replaceable]``aws-observability-namespace``.yaml` on your computer. The value for `name` must be `aws-observability` and the `aws-observability: enabled` label is required. ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: Namespace +apiVersion: v1 +metadata: + name: aws-observability + labels: + aws-observability: enabled +---- +.. Create the namespace. ++ +[source,bash,subs="verbatim,attributes,quotes"] +---- +kubectl apply -f [.replaceable]`aws-observability-namespace`.yaml +---- +. Create a `ConfigMap` with a `Fluent Conf` data value to ship container logs to a destination. Fluent Conf is [.noloc]`Fluent Bit`, which is a fast and lightweight log processor configuration language that's used to route container logs to a log destination of your choice. For more information, see https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file[Configuration File] in the [.noloc]`Fluent Bit` documentation. ++ +[IMPORTANT] +==== +The main sections included in a typical `Fluent Conf` are `Service`, `Input`, `Filter`, and `Output`. The Fargate log router however, only accepts: + +* The `Filter` and `Output` sections. +* A `Parser` section. + +If you provide any other sections, they will be rejected. +==== ++ +The Fargate log router manages the `Service` and `Input` sections. It has the following `Input` section, which can't be modified and isn't needed in your `ConfigMap`. However, you can get insights from it, such as the memory buffer limit and the tag applied for logs. ++ +[source,yaml,subs="verbatim,attributes"] +---- +[INPUT] + Name tail + Buffer_Max_Size 66KB + DB /var/log/flb_kube.db + Mem_Buf_Limit 45MB + Path /var/log/containers/*.log + Read_From_Head On + Refresh_Interval 10 + Rotate_Wait 30 + Skip_Long_Lines On + Tag kube.* +---- ++ +When creating the `ConfigMap`, take into account the following rules that Fargate uses to validate fields: ++ +* `[FILTER]`, `[OUTPUT]`, and `[PARSER]` are supposed to be specified under each corresponding key. For example, `[FILTER]` must be under `filters.conf`. You can have one or more ``[FILTER]``s under `filters.conf`. The `[OUTPUT]` and `[PARSER]` sections should also be under their corresponding keys. By specifying multiple `[OUTPUT]` sections, you can route your logs to different destinations at the same time. +* Fargate validates the required keys for each section. `Name` and `match` are required for each `[FILTER]` and `[OUTPUT]`. `Name` and `format` are required for each `[PARSER]`. The keys are case-insensitive. +* Environment variables such as `${ENV_VAR}` aren't allowed in the `ConfigMap`. +* The indentation has to be the same for either directive or key-value pair within each `filters.conf`, `output.conf`, and `parsers.conf`. Key-value pairs have to be indented more than directives. +* Fargate validates against the following supported filters: `grep`, `parser`, `record_modifier`, `rewrite_tag`, `throttle`, `nest`, `modify`, and `kubernetes`. +* Fargate validates against the following supported output: `es`, `firehose`, `kinesis_firehose`, `cloudwatch`, `cloudwatch_logs`, and `kinesis`. +* At least one supported `Output` plugin has to be provided in the `ConfigMap` to enable logging. `Filter` and `Parser` aren't required to enable logging. + ++ +You can also run [.noloc]`Fluent Bit` on Amazon EC2 using the desired configuration to troubleshoot any issues that arise from validation. Create your `ConfigMap` using one of the following examples. + ++ +[IMPORTANT] +==== +Amazon EKS Fargate logging doesn't support dynamic configuration of a `ConfigMap`. Any changes to a `ConfigMap` are applied to new [.noloc]`Pods` only. Changes aren't applied to existing [.noloc]`Pods`. +==== + ++ +Create a `ConfigMap` using the example for your desired log destination. + ++ +[NOTE] +==== +You can also use Amazon Kinesis Data Streams for your log destination. If you use Kinesis Data Streams, make sure that the pod execution role has been granted the `kinesis:PutRecords` permission. For more information, see Amazon Kinesis Data Streams https://docs.fluentbit.io/manual/pipeline/outputs/kinesis#permissions[Permissions] in the _Fluent Bit: Official Manual_. +==== + ++ +==== +[role="tablist"] +CloudWatch:: +*To create a `ConfigMap` for CloudWatch* + ++ +You have two output options when using CloudWatch: ++ +* https://docs.fluentbit.io/manual/v/1.5/pipeline/outputs/cloudwatch[An output plugin written in C] +* https://github.com/aws/amazon-cloudwatch-logs-for-fluent-bit[An output plugin written in Golang] + ++ +The following example shows you how to use the `cloudwatch_logs` plugin to send logs to CloudWatch. + +.. Save the following contents to a file named `[.replaceable]``aws-logging-cloudwatch-configmap``.yaml`. Replace [.replaceable]`region-code` with the {aws} Region that your cluster is in. The parameters under `[OUTPUT]` are required. +// Not using subs="quotes" here with [.replaceable]`region-code` because the ^ characters get dropped, even when using AsciiDoc's built-in {caret} character replacement attribute. ++ +[source,yaml,subs="verbatim,attributes"] +---- +kind: ConfigMap +apiVersion: v1 +metadata: + name: aws-logging + namespace: aws-observability +data: + flb_log_cw: "false" # Set to true to ship Fluent Bit process logs to CloudWatch. + filters.conf: | + [FILTER] + Name parser + Match * + Key_name log + Parser crio + [FILTER] + Name kubernetes + Match kube.* + Merge_Log On + Keep_Log Off + Buffer_Size 0 + Kube_Meta_Cache_TTL 300s + output.conf: | + [OUTPUT] + Name cloudwatch_logs + Match kube.* + region region-code + log_group_name my-logs + log_stream_prefix from-fluent-bit- + log_retention_days 60 + auto_create_group true + parsers.conf: | + [PARSER] + Name crio + Format Regex + Regex ^(?