From 8715e58b8835bcb3dbd355c270ed88f2cb6812fc Mon Sep 17 00:00:00 2001 From: Nuru Date: Fri, 31 May 2024 11:39:35 -0700 Subject: [PATCH] [Karpenter] Minor cleanups (#1056) --- modules/eks/karpenter-node-pool/README.md | 2 +- modules/eks/karpenter-node-pool/main.tf | 4 ++ modules/eks/karpenter-node-pool/variables.tf | 4 +- modules/eks/karpenter/README.md | 3 +- modules/eks/karpenter/controller-policy.tf | 2 +- modules/eks/karpenter/provider-helm.tf | 69 +++++++++++++++----- 6 files changed, 63 insertions(+), 21 deletions(-) diff --git a/modules/eks/karpenter-node-pool/README.md b/modules/eks/karpenter-node-pool/README.md index e972c2299..fbc7271bb 100644 --- a/modules/eks/karpenter-node-pool/README.md +++ b/modules/eks/karpenter-node-pool/README.md @@ -203,7 +203,7 @@ components: | [labels\_as\_tags](#input\_labels\_as\_tags) | Set of labels (ID elements) to include as tags in the `tags` output.
Default is to include all labels.
Tags with empty values will not be included in the `tags` output.
Set to `[]` to suppress all generated tags.
**Notes:**
The value of the `name` tag, if included, will be the `id`, not the `name`.
Unlike other `null-label` inputs, the initial setting of `labels_as_tags` cannot be
changed in later chained modules. Attempts to change it will be silently ignored. | `set(string)` |
[
"default"
]
| no | | [name](#input\_name) | ID element. Usually the component or solution name, e.g. 'app' or 'jenkins'.
This is the only ID element not also included as a `tag`.
The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input. | `string` | `null` | no | | [namespace](#input\_namespace) | ID element. Usually an abbreviation of your organization name, e.g. 'eg' or 'cp', to help ensure generated IDs are globally unique | `string` | `null` | no | -| [node\_pools](#input\_node\_pools) | Configuration for node pools. See code for details. |
map(object({
# The name of the Karpenter provisioner. The map key is used if this is not set.
name = optional(string)
# Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets.
private_subnets_enabled = bool
# The Disruption spec controls how Karpenter scales down the node group.
# See the example (sadly not the specific `spec.disruption` documentation) at https://karpenter.sh/docs/concepts/nodepools/ for details
disruption = optional(object({
# Describes which types of Nodes Karpenter should consider for consolidation.
# If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or
# replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost.
# If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods.
consolidation_policy = optional(string, "WhenUnderutilized")

# The amount of time Karpenter should wait after discovering a consolidation decision (`go` duration string, s|m|h).
# This value can currently (v0.36.0) only be set when the consolidationPolicy is 'WhenEmpty'.
# You can choose to disable consolidation entirely by setting the string value 'Never' here.
# Earlier versions of Karpenter called this field `ttl_seconds_after_empty`.
consolidate_after = optional(string)

# The amount of time a Node can live on the cluster before being removed (`go` duration string, s|m|h).
# You can choose to disable expiration entirely by setting the string value 'Never' here.
# This module sets a default of 336 hours (14 days), while the Karpenter default is 720 hours (30 days).
# Note that Karpenter calls this field "expiresAfter", and earlier versions called it `ttl_seconds_until_expired`,
# but we call it "max_instance_lifetime" to match the corresponding field in EC2 Auto Scaling Groups.
max_instance_lifetime = optional(string, "336h")

# Budgets control the the maximum number of NodeClaims owned by this NodePool that can be terminating at once.
# See https://karpenter.sh/docs/concepts/disruption/#disruption-budgets for details.
# A percentage is the percentage of the total number of active, ready nodes not being deleted, rounded up.
# If there are multiple active budgets, Karpenter uses the most restrictive value.
# If left undefined, this will default to one budget with a value of nodes: 10%.
# Note that budgets do not prevent or limit involuntary terminations.
# Example:
# On Weekdays during business hours, don't do any deprovisioning.
# budgets = {
# schedule = "0 9 * * mon-fri"
# duration = 8h
# nodes = "0"
# }
budgets = optional(list(object({
# The schedule specifies when a budget begins being active, using extended cronjob syntax.
# See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax for syntax details.
# Timezones are not supported. This field is required if Duration is set.
schedule = optional(string)
# Duration determines how long a Budget is active after each Scheduled start.
# If omitted, the budget is always active. This is required if Schedule is set.
# Must be a whole number of minutes and hours, as cron does not work in seconds,
# but since Go's `duration.String()` always adds a "0s" at the end, that is allowed.
duration = optional(string)
# The percentage or number of nodes that Karpenter can scale down during the budget.
nodes = string
})), [])
}), {})
# Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter
total_cpu_limit = string
# Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter
total_memory_limit = string
# Set a weight for this node pool.
# See https://karpenter.sh/docs/concepts/scheduling/#weighted-nodepools
weight = optional(number, 50)
# Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details
taints = optional(list(object({
key = string
effect = string
value = string
})))
startup_taints = optional(list(object({
key = string
effect = string
value = string
})))
# Karpenter node metadata options. See https://karpenter.sh/docs/concepts/nodeclasses/#specmetadataoptions for more details
metadata_options = optional(object({
httpEndpoint = optional(string, "enabled")
httpProtocolIPv6 = optional(string, "disabled")
httpPutResponseHopLimit = optional(number, 2)
# httpTokens can be either "required" or "optional"
httpTokens = optional(string, "required")
}), {})
# The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM)
ami_family = string
# Karpenter nodes block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes.
# Karpenter uses default block device mappings for the AMI Family specified.
# For example, the Bottlerocket AMI Family defaults with two block device mappings,
# and normally you only want to scale `/dev/xvdb` where Containers and there storage are stored.
# Most other AMIs only have one device mapping at `/dev/xvda`.
# See https://karpenter.sh/docs/concepts/nodeclasses/#specblockdevicemappings for more details
block_device_mappings = list(object({
deviceName = string
ebs = optional(object({
volumeSize = string
volumeType = string
deleteOnTermination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number)
kmsKeyID = optional(string, "alias/aws/ebs")
snapshotID = optional(string)
throughput = optional(number)
}))
}))
# Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, and capacity type (such as AWS spot or on-demand). See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details
requirements = list(object({
key = string
operator = string
# Operators like "Exists" and "DoesNotExist" do not require a value
values = optional(list(string))
}))
}))
| n/a | yes | +| [node\_pools](#input\_node\_pools) | Configuration for node pools. See code for details. |
map(object({
# The name of the Karpenter provisioner. The map key is used if this is not set.
name = optional(string)
# Whether to place EC2 instances launched by Karpenter into VPC private subnets. Set it to `false` to use public subnets.
private_subnets_enabled = bool
# The Disruption spec controls how Karpenter scales down the node group.
# See the example (sadly not the specific `spec.disruption` documentation) at https://karpenter.sh/docs/concepts/nodepools/ for details
disruption = optional(object({
# Describes which types of Nodes Karpenter should consider for consolidation.
# If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or
# replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost.
# If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods.
consolidation_policy = optional(string, "WhenUnderutilized")

# The amount of time Karpenter should wait after discovering a consolidation decision (`go` duration string, s|m|h).
# This value can currently (v0.36.0) only be set when the consolidationPolicy is 'WhenEmpty'.
# You can choose to disable consolidation entirely by setting the string value 'Never' here.
# Earlier versions of Karpenter called this field `ttl_seconds_after_empty`.
consolidate_after = optional(string)

# The amount of time a Node can live on the cluster before being removed (`go` duration string, s|m|h).
# You can choose to disable expiration entirely by setting the string value 'Never' here.
# This module sets a default of 336 hours (14 days), while the Karpenter default is 720 hours (30 days).
# Note that Karpenter calls this field "expiresAfter", and earlier versions called it `ttl_seconds_until_expired`,
# but we call it "max_instance_lifetime" to match the corresponding field in EC2 Auto Scaling Groups.
max_instance_lifetime = optional(string, "336h")

# Budgets control the the maximum number of NodeClaims owned by this NodePool that can be terminating at once.
# See https://karpenter.sh/docs/concepts/disruption/#disruption-budgets for details.
# A percentage is the percentage of the total number of active, ready nodes not being deleted, rounded up.
# If there are multiple active budgets, Karpenter uses the most restrictive value.
# If left undefined, this will default to one budget with a value of nodes: 10%.
# Note that budgets do not prevent or limit involuntary terminations.
# Example:
# On Weekdays during business hours, don't do any deprovisioning.
# budgets = {
# schedule = "0 9 * * mon-fri"
# duration = 8h
# nodes = "0"
# }
budgets = optional(list(object({
# The schedule specifies when a budget begins being active, using extended cronjob syntax.
# See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax for syntax details.
# Timezones are not supported. This field is required if Duration is set.
schedule = optional(string)
# Duration determines how long a Budget is active after each Scheduled start.
# If omitted, the budget is always active. This is required if Schedule is set.
# Must be a whole number of minutes and hours, as cron does not work in seconds,
# but since Go's `duration.String()` always adds a "0s" at the end, that is allowed.
duration = optional(string)
# The percentage or number of nodes that Karpenter can scale down during the budget.
nodes = string
})), [])
}), {})
# Karpenter provisioner total CPU limit for all pods running on the EC2 instances launched by Karpenter
total_cpu_limit = string
# Karpenter provisioner total memory limit for all pods running on the EC2 instances launched by Karpenter
total_memory_limit = string
# Set a weight for this node pool.
# See https://karpenter.sh/docs/concepts/scheduling/#weighted-nodepools
weight = optional(number, 50)
labels = optional(map(string))
annotations = optional(map(string))
# Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details
taints = optional(list(object({
key = string
effect = string
value = string
})))
startup_taints = optional(list(object({
key = string
effect = string
value = string
})))
# Karpenter node metadata options. See https://karpenter.sh/docs/concepts/nodeclasses/#specmetadataoptions for more details
metadata_options = optional(object({
httpEndpoint = optional(string, "enabled")
httpProtocolIPv6 = optional(string, "disabled")
httpPutResponseHopLimit = optional(number, 2)
# httpTokens can be either "required" or "optional"
httpTokens = optional(string, "required")
}), {})
# The AMI used by Karpenter provisioner when provisioning nodes. Based on the value set for amiFamily, Karpenter will automatically query for the appropriate EKS optimized AMI via AWS Systems Manager (SSM)
ami_family = string
# Karpenter nodes block device mappings. Controls the Elastic Block Storage volumes that Karpenter attaches to provisioned nodes.
# Karpenter uses default block device mappings for the AMI Family specified.
# For example, the Bottlerocket AMI Family defaults with two block device mappings,
# and normally you only want to scale `/dev/xvdb` where Containers and there storage are stored.
# Most other AMIs only have one device mapping at `/dev/xvda`.
# See https://karpenter.sh/docs/concepts/nodeclasses/#specblockdevicemappings for more details
block_device_mappings = list(object({
deviceName = string
ebs = optional(object({
volumeSize = string
volumeType = string
deleteOnTermination = optional(bool, true)
encrypted = optional(bool, true)
iops = optional(number)
kmsKeyID = optional(string, "alias/aws/ebs")
snapshotID = optional(string)
throughput = optional(number)
}))
}))
# Set acceptable (In) and unacceptable (Out) Kubernetes and Karpenter values for node provisioning based on Well-Known Labels and cloud-specific settings. These can include instance types, zones, computer architecture, and capacity type (such as AWS spot or on-demand). See https://karpenter.sh/v0.18.0/provisioner/#specrequirements for more details
requirements = list(object({
key = string
operator = string
# Operators like "Exists" and "DoesNotExist" do not require a value
values = optional(list(string))
}))
}))
| n/a | yes | | [regex\_replace\_chars](#input\_regex\_replace\_chars) | Terraform regular expression (regex) string.
Characters matching the regex will be removed from the ID elements.
If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits. | `string` | `null` | no | | [region](#input\_region) | AWS Region | `string` | n/a | yes | | [stage](#input\_stage) | ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release' | `string` | `null` | no | diff --git a/modules/eks/karpenter-node-pool/main.tf b/modules/eks/karpenter-node-pool/main.tf index a393c4d43..67c5b57b9 100644 --- a/modules/eks/karpenter-node-pool/main.tf +++ b/modules/eks/karpenter-node-pool/main.tf @@ -39,6 +39,10 @@ resource "kubernetes_manifest" "node_pool" { } ) template = { + metadata = { + labels = each.value.labels + annotations = each.value.annotations + } spec = merge({ nodeClassRef = { apiVersion = "karpenter.k8s.aws/v1beta1" diff --git a/modules/eks/karpenter-node-pool/variables.tf b/modules/eks/karpenter-node-pool/variables.tf index d768a0131..63e7d8d2e 100644 --- a/modules/eks/karpenter-node-pool/variables.tf +++ b/modules/eks/karpenter-node-pool/variables.tf @@ -70,7 +70,9 @@ variable "node_pools" { total_memory_limit = string # Set a weight for this node pool. # See https://karpenter.sh/docs/concepts/scheduling/#weighted-nodepools - weight = optional(number, 50) + weight = optional(number, 50) + labels = optional(map(string)) + annotations = optional(map(string)) # Karpenter provisioner taints configuration. See https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive for more details taints = optional(list(object({ key = string diff --git a/modules/eks/karpenter/README.md b/modules/eks/karpenter/README.md index 64cbdafda..e0afff396 100644 --- a/modules/eks/karpenter/README.md +++ b/modules/eks/karpenter/README.md @@ -399,7 +399,8 @@ For more details on the CRDs, see: | [kube\_exec\_auth\_enabled](#input\_kube\_exec\_auth\_enabled) | If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster.
Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. | `bool` | `true` | no | | [kube\_exec\_auth\_role\_arn](#input\_kube\_exec\_auth\_role\_arn) | The role ARN for `aws eks get-token` to use | `string` | `""` | no | | [kube\_exec\_auth\_role\_arn\_enabled](#input\_kube\_exec\_auth\_role\_arn\_enabled) | If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token` | `bool` | `true` | no | -| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes kube config file | `string` | `""` | no | +| [kubeconfig\_context](#input\_kubeconfig\_context) | Context to choose from the Kubernetes config file.
If supplied, `kubeconfig_context_format` will be ignored. | `string` | `""` | no | +| [kubeconfig\_context\_format](#input\_kubeconfig\_context\_format) | A format string to use for creating the `kubectl` context name when
`kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied.
Must include a single `%s` which will be replaced with the cluster name. | `string` | `""` | no | | [kubeconfig\_exec\_auth\_api\_version](#input\_kubeconfig\_exec\_auth\_api\_version) | The Kubernetes API version of the credentials returned by the `exec` auth plugin | `string` | `"client.authentication.k8s.io/v1beta1"` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true` | `string` | `""` | no | | [kubeconfig\_file\_enabled](#input\_kubeconfig\_file\_enabled) | If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster | `bool` | `false` | no | diff --git a/modules/eks/karpenter/controller-policy.tf b/modules/eks/karpenter/controller-policy.tf index 6e234378b..f2b4924f2 100644 --- a/modules/eks/karpenter/controller-policy.tf +++ b/modules/eks/karpenter/controller-policy.tf @@ -28,7 +28,7 @@ # "Resource": "arn:${local.aws_partition}:eks:${var.region}:${AWS::AccountId}:cluster/${local.eks_cluster_id}" # # NOTE: As a special case, the above multiple substitutions which create the ARN for the EKS cluster -# should be replaced with a single substitution, `${local.eks_cluster_arn}` to avoid neeeding to +# should be replaced with a single substitution, `${local.eks_cluster_arn}` to avoid needing to # look up the account ID and because it is more robust. # # Review the existing HEREDOC below to find conditionals such as: diff --git a/modules/eks/karpenter/provider-helm.tf b/modules/eks/karpenter/provider-helm.tf index 64459d4f4..91cc7f6d4 100644 --- a/modules/eks/karpenter/provider-helm.tf +++ b/modules/eks/karpenter/provider-helm.tf @@ -21,18 +21,35 @@ variable "kubeconfig_file_enabled" { type = bool default = false description = "If `true`, configure the Kubernetes provider with `kubeconfig_file` and use that kubeconfig file for authenticating to the EKS cluster" + nullable = false } variable "kubeconfig_file" { type = string default = "" description = "The Kubernetes provider `config_path` setting to use when `kubeconfig_file_enabled` is `true`" + nullable = false } variable "kubeconfig_context" { type = string default = "" - description = "Context to choose from the Kubernetes kube config file" + description = <<-EOT + Context to choose from the Kubernetes config file. + If supplied, `kubeconfig_context_format` will be ignored. + EOT + nullable = false +} + +variable "kubeconfig_context_format" { + type = string + default = "" + description = <<-EOT + A format string to use for creating the `kubectl` context name when + `kubeconfig_file_enabled` is `true` and `kubeconfig_context` is not supplied. + Must include a single `%s` which will be replaced with the cluster name. + EOT + nullable = false } variable "kube_data_auth_enabled" { @@ -42,6 +59,7 @@ variable "kube_data_auth_enabled" { If `true`, use an `aws_eks_cluster_auth` data source to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled` or `kube_exec_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_enabled" { @@ -51,48 +69,62 @@ variable "kube_exec_auth_enabled" { If `true`, use the Kubernetes provider `exec` feature to execute `aws eks get-token` to authenticate to the EKS cluster. Disabled by `kubeconfig_file_enabled`, overrides `kube_data_auth_enabled`. EOT + nullable = false } variable "kube_exec_auth_role_arn" { type = string default = "" description = "The role ARN for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_role_arn_enabled" { type = bool default = true description = "If `true`, pass `kube_exec_auth_role_arn` as the role ARN to `aws eks get-token`" + nullable = false } variable "kube_exec_auth_aws_profile" { type = string default = "" description = "The AWS config profile for `aws eks get-token` to use" + nullable = false } variable "kube_exec_auth_aws_profile_enabled" { type = bool default = false description = "If `true`, pass `kube_exec_auth_aws_profile` as the `profile` to `aws eks get-token`" + nullable = false } variable "kubeconfig_exec_auth_api_version" { type = string default = "client.authentication.k8s.io/v1beta1" description = "The Kubernetes API version of the credentials returned by the `exec` auth plugin" + nullable = false } variable "helm_manifest_experiment_enabled" { type = bool default = false description = "Enable storing of the rendered manifest for helm_release so the full diff of what is changing can been seen in the plan" + nullable = false } locals { kubeconfig_file_enabled = var.kubeconfig_file_enabled - kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled + kubeconfig_file = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" + kubeconfig_context = !local.kubeconfig_file_enabled ? "" : ( + length(var.kubeconfig_context) != 0 ? var.kubeconfig_context : ( + length(var.kubeconfig_context_format) != 0 ? format(var.kubeconfig_context_format, local.eks_cluster_id) : "" + ) + ) + + kube_exec_auth_enabled = local.kubeconfig_file_enabled ? false : var.kube_exec_auth_enabled + kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : var.kube_data_auth_enabled # Eventually we might try to get this from an environment variable kubeconfig_exec_auth_api_version = var.kubeconfig_exec_auth_api_version @@ -107,10 +139,11 @@ locals { ] : [] # Provide dummy configuration for the case where the EKS cluster is not available. - certificate_authority_data = try(module.eks.outputs.eks_cluster_certificate_authority_data, "") + certificate_authority_data = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_certificate_authority_data, null) + cluster_ca_certificate = local.kubeconfig_file_enabled ? null : try(base64decode(local.certificate_authority_data), null) # Use coalesce+try to handle both the case where the output is missing and the case where it is empty. eks_cluster_id = coalesce(try(module.eks.outputs.eks_cluster_id, ""), "missing") - eks_cluster_endpoint = try(module.eks.outputs.eks_cluster_endpoint, "") + eks_cluster_endpoint = local.kubeconfig_file_enabled ? null : try(module.eks.outputs.eks_cluster_endpoint, "") } data "aws_eks_cluster_auth" "eks" { @@ -121,15 +154,16 @@ data "aws_eks_cluster_auth" "eks" { provider "helm" { kubernetes { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws" @@ -146,15 +180,16 @@ provider "helm" { provider "kubernetes" { host = local.eks_cluster_endpoint - cluster_ca_certificate = base64decode(local.certificate_authority_data) + cluster_ca_certificate = local.cluster_ca_certificate token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. - config_path = local.kubeconfig_file_enabled ? var.kubeconfig_file : "" - config_context = var.kubeconfig_context + # It is too confusing to allow the Kubernetes provider to use environment variables to set authentication + # in this module because we have so many options, so we override environment variables like `KUBE_CONFIG_PATH` + # in all cases. People can still use environment variables by setting TF_VAR_kubeconfig_file. + config_path = local.kubeconfig_file + config_context = local.kubeconfig_context dynamic "exec" { - for_each = local.kube_exec_auth_enabled && length(local.certificate_authority_data) > 0 ? ["exec"] : [] + for_each = local.kube_exec_auth_enabled && local.certificate_authority_data != null ? ["exec"] : [] content { api_version = local.kubeconfig_exec_auth_api_version command = "aws"