diff --git a/cookiecutter.json b/cookiecutter.json index 1c2b4b35..9f9bfb6d 100644 --- a/cookiecutter.json +++ b/cookiecutter.json @@ -11,20 +11,13 @@ "aws_region": "us-east-1", "aws_account_id": "000000000000", "create_nextjs_frontend": "y", - "mail_service": [ - "Mailgun", - "Amazon SES", - "Other SMTP" - ], + "debug": "n", + "mail_service": ["Mailgun", "Amazon SES", "Other SMTP"], + "operating_system": ["k3s", "talos"], "_challenge": "n", "use_celery": "n", "use_sentry": "n", - "debug": "n", - "source_control_provider": [ - "github.com", - "bitbucket.org", - "none" - ], + "source_control_provider": ["github.com", "bitbucket.org", "none"], "source_control_organization_slug": "sixfeetup", "__prompts__": { "source_control_organization_slug": "What is the organization slug for the source control provider?", diff --git a/{{cookiecutter.project_slug}}/bootstrap-cluster/.env b/{{cookiecutter.project_slug}}/bootstrap-cluster/.env new file mode 100644 index 00000000..ec2a3f5e --- /dev/null +++ b/{{cookiecutter.project_slug}}/bootstrap-cluster/.env @@ -0,0 +1,7 @@ +TOFU_DIR: "../terraform" +{%- if cookiecutter.operating_system == "talos" %} +TALOS_FACTORY_IMAGE: "factory.talos.dev/installer/10e276a06c1f86b182757a962258ac00655d3425e5957f617bdc82f06894e39b:v1.7.6" +{%- endif %} +ARGOCD_VERSION: 7.4.1 +REPO_URL: "{{ cookiecutter.repo_url }}" +REPO_NAME: "{{ cookiecutter.repo_name }}" diff --git a/{{cookiecutter.project_slug}}/bootstrap-cluster/.gitignore b/{{cookiecutter.project_slug}}/bootstrap-cluster/.gitignore index 322b2ade..fe7a98fb 100644 --- a/{{cookiecutter.project_slug}}/bootstrap-cluster/.gitignore +++ b/{{cookiecutter.project_slug}}/bootstrap-cluster/.gitignore @@ -1,7 +1,11 @@ !sandbox/.env !staging/.env !prod/.env +!.env controlplane.yaml kubeconfig talosconfig worker.yaml +id_ed25519 +*_ips.txt +*.bak.* diff --git a/{{cookiecutter.project_slug}}/bootstrap-cluster/README.md b/{{cookiecutter.project_slug}}/bootstrap-cluster/README.md index 01d3595a..084d143c 100644 --- a/{{cookiecutter.project_slug}}/bootstrap-cluster/README.md +++ b/{{cookiecutter.project_slug}}/bootstrap-cluster/README.md @@ -1,10 +1,18 @@ -# Bootstrap Talos and ArgoCD +{%- if cookiecutter.operating_system == "talos" %}# Bootstrap Talos and ArgoCD After deploying infrastructure using Terraform, we can proceed with configuring Talos and bootstrapping ArgoCD. Terraform is solely utilized for deploying infrastructure. Any subsequent configuration of Talos or ArgoCD is done using Taskfile tasks. +{%- elif cookiecutter.operating_system == "k3s" %}# Bootstrap k3s and ArgoCD + +After deploying infrastructure using Terraform, we can proceed with configuring +k3s and bootstrapping ArgoCD. + +Terraform is solely utilized for deploying infrastructure. Any subsequent +configuration of k3s or ArgoCD is done using Taskfile tasks. +{%- endif %} To view a list of tasks and their descriptions, navigate to the `bootstrap-cluster` directory and execute `task`. @@ -15,8 +23,16 @@ cluster. We recommend opening the AWS serial console for each ec2 instance to monitor the bootstrap process. +{%- if cookiecutter.operating_system == "talos" %} + ### Bootstrapping Talos +{%- elif cookiecutter.operating_system == "k3s" %} + +### Bootstrapping k3s + +{%- endif %} + 1. Navigate to the directory corresponding to the environment being set up and run: @@ -32,6 +48,7 @@ bootstrap process. CLUSTER_NAME: "{{ cookiecutter.project_dash }}-sandbox" ``` +{%- if cookiecutter.operating_system == "talos" %} Note that we use a Talos factory image. This image contains a system extension that provides the ECR credential provider. @@ -42,7 +59,8 @@ bootstrap process. CredentialProvider API to authenticate against AWS' Elastic Container Registry and pull images. ``` - +{%- endif %} +{%- if cookiecutter.operating_system == "talos" %} 3. Bootstrap Talos with the following command: ``` @@ -51,45 +69,84 @@ bootstrap process. To understand what this task will do, examine the Taskfile configuration: + ```yaml + bootstrap: + desc: | + Run all tasks required to bootstrap the Talos and Kubernetes cluster. + requires: + vars: [ENV] + cmds: + - task: generate_configs + - task: set_node_ips + - task: store_controlplane_config + - task: store_talosconfig + - task: apply_talos_config + - sleep 30 + - task: bootstrap_kubernetes + - sleep 30 + - task: generate_kubeconfig + - task: store_kubeconfig + - task: upgrade_talos + - task: enable_ecr_credential_helper ``` + + It takes a few minutes for the cluster nodes to register as etcd + members and synchronize. + +{%- elif cookiecutter.operating_system == "k3s" %} +3. Bootstrap k3s with the following command: + + ``` + task k3s:bootstrap + ``` + + To understand what this task will do, examine the Taskfile configuration: + + ```yaml bootstrap: - desc: | - Run all tasks required to bootstrap the Talos and Kubernetes cluster. - requires: - vars: [ENV] - cmds: - - task: generate_configs - - task: set_node_ips - - task: store_controlplane_config - - task: store_talosconfig - - task: apply_talos_config - - sleep 30 - - task: bootstrap_kubernetes - - sleep 30 - - task: generate_kubeconfig - - task: store_kubeconfig - - task: upgrade_talos - - task: enable_ecr_credential_helper + desc: | + Run all tasks required to bootstrap k3s and Kubernetes cluster. + requires: + vars: [ENV] + cmds: + - task: save-node-ips + - task: setup-ssh-key + - task: install-k3s + - task: fetch-kubeconfig + - task: store-kubeconfig + - task: enable-ecr-credential-helper ``` It takes a few minutes for the cluster nodes to register as etcd members and synchronize. +{%- endif %} + If the cluster fails to bootstrap, refer to the Troubleshooting section below. +{%- if cookiecutter.operating_system == "talos" %} 4. Verify the health of your cluster with: - ```shell - task talos:health - ``` + ```shell + task talos:health + ``` + + 5. Test kubectl access: -5. Test kubectl access: + ```shell + eval $(task talos:kubeconfig) + kubectl cluster-info + ``` +{%- elif cookiecutter.operating_system == "k3s" %} + +4. Test kubectl access: ```shell - eval $(task talos:kubeconfig) + eval $(task k3s:kubeconfig) kubectl cluster-info ``` +{%- endif %} This should return output similar to the following: @@ -205,6 +262,7 @@ The `argocd:bootstrap` task configuration is as follows: ## Troubleshooting +{%- if cookiecutter.operating_system == "talos" %} If bootstrapping Talos fails, we recommend resetting the config files and recreating ec2 instances before trying again. @@ -220,3 +278,11 @@ recreating ec2 instances before trying again. -target "module.c luster.module.control_plane_nodes[1].aws_instance.this[0]" terraform plan -out="tfplan.out" terraform apply tfplan.out +{%- elif cookiecutter.operating_system == "k3s" %} +If bootstrapping k3s fails, we recommend uninstalling k3s from each node and +boostrapping from scratch. + +```shell +task k3s:uninstall-k3s +``` +{%- endif %} diff --git a/{{cookiecutter.project_slug}}/bootstrap-cluster/k3s.yaml b/{{cookiecutter.project_slug}}/bootstrap-cluster/k3s.yaml new file mode 100644 index 00000000..f77dbe30 --- /dev/null +++ b/{{cookiecutter.project_slug}}/bootstrap-cluster/k3s.yaml @@ -0,0 +1,237 @@ +--- +version: '3' +env: + KUBECONFIG: ./{{ '{{.ENV}}' }}/kubeconfig + SSH_KEY: ./{{ '{{.ENV}}' }}/id_ed25519 + TFDIR: ../terraform/{{ '{{.ENV}}' }} + +tasks: + bootstrap: + desc: | + Run all tasks required to bootstrap k3s and Kubernetes cluster. + requires: + vars: [ENV] + cmds: + - task: save-node-ips + - task: setup-ssh-key + - task: install-ecr-credential-helper + - task: install-k3s + - task: fetch-kubeconfig + - task: store-kubeconfig + + save-node-ips: + desc: Save node IPs to files for later reference + requires: + vars: [ENV] + cmds: + - tofu -chdir=$TFDIR output control_plane_nodes_public_ips | tr -d '"' > {{ '{{.ENV}}' }}/public_ips.txt + - tofu -chdir=$TFDIR output control_plane_nodes_private_ips | tr -d '"' > {{ '{{.ENV}}' }}/private_ips.txt + + setup-ssh-key: + desc: Extract and save SSH key from terraform output + cmds: + - tofu -chdir={{ '{{.TFDIR}}' }} output -raw private_deploy_key > {{ '{{.SSH_KEY}}' }} + - chmod 600 $SSH_KEY + + ssh-server-node: + vars: + IP: + sh: head -n1 {{ '{{.ENV}}' }}/public_ips.txt + requires: + vars: [ENV] + cmds: + - TERM=xterm-256color ssh -oStrictHostKeyChecking=no -i {{ '{{.SSH_KEY}}' }} ubuntu@{{ '{{.IP}}' }} + + install-ecr-credential-helper: + desc: | + Install the ECR credential helper on all control plane nodes. + vars: + NODE_IPS: + sh: cat {{ '{{.ENV}}' }}/public_ips.txt + requires: + vars: [ENV] + cmds: + - | + for ip in $(echo "{{ '{{.NODE_IPS}}' }}"); do + echo "Installing ECR credential helper on $ip..." + # Download and install the ECR credential provider + ssh -oStrictHostKeyChecking=no -i {{ '{{.SSH_KEY}}' }} ubuntu@$ip \ + 'sudo wget https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.29.0/linux/amd64/ecr-credential-provider-linux-amd64 -O /usr/local/bin/ecr-credential-provider && \ + sudo chmod +x /usr/local/bin/ecr-credential-provider' + + # Create credential provider config + ssh -oStrictHostKeyChecking=no -i {{ '{{.SSH_KEY}}' }} ubuntu@$ip \ + 'sudo mkdir -p /etc/rancher/k3s && \ + echo "apiVersion: kubelet.config.k8s.io/v1 + kind: CredentialProviderConfig + providers: + - name: ecr-credential-provider + matchImages: + - \"*.dkr.ecr.*.amazonaws.com\" + defaultCacheDuration: \"12h\" + apiVersion: credentialprovider.kubelet.k8s.io/v1" | sudo tee /etc/rancher/k3s/credential-provider-config.yaml' + done + + install-k3s: + desc: Install k3s on nodes + vars: + PUBLIC_IPS: + sh: cat {{ '{{.ENV}}' }}/public_ips.txt + PRIVATE_IPS: + sh: cat {{ '{{.ENV}}' }}/private_ips.txt + FIRST_PUBLIC_IP: + sh: head -n1 {{ '{{.ENV}}' }}/public_ips.txt + FIRST_PRIVATE_IP: + sh: head -n1 {{ '{{.ENV}}' }}/private_ips.txt + OTHER_PUBLIC_IPS: + sh: tail -n +2 {{ '{{.ENV}}' }}/public_ips.txt + OTHER_PRIVATE_IPS: + sh: tail -n +2 {{ '{{.ENV}}' }}/private_ips.txt + NODE_COUNT: + sh: wc -l < {{ '{{.ENV}}' }}/public_ips.txt + K3S_TOKEN: + sh: tr -dc A-Za-z0-9 ./{{ '{{.ENV}}' }}/kubeconfig + - chmod 600 {{ '{{.ENV}}' }}/kubeconfig diff --git a/{{cookiecutter.project_slug}}/bootstrap-cluster/taskfile.yaml b/{{cookiecutter.project_slug}}/bootstrap-cluster/taskfile.yaml index 1a0a2e4b..6eb7ecd1 100644 --- a/{{cookiecutter.project_slug}}/bootstrap-cluster/taskfile.yaml +++ b/{{cookiecutter.project_slug}}/bootstrap-cluster/taskfile.yaml @@ -4,7 +4,11 @@ dotenv: ['.env', '{{ "{{.ENV}}" }}/.env'] includes: argocd: ./argocd.yaml +{%- if cookiecutter.operating_system == "talos" %} talos: ./talos.yaml +{%- elif cookiecutter.operating_system == "k3s" %} + k3s: ./k3s.yaml +{%- endif %} tasks: default: diff --git a/{{cookiecutter.project_slug}}/terraform/README.md b/{{cookiecutter.project_slug}}/terraform/README.md index 04f3253a..06d53180 100644 --- a/{{cookiecutter.project_slug}}/terraform/README.md +++ b/{{cookiecutter.project_slug}}/terraform/README.md @@ -29,6 +29,7 @@ run the Terraform configurations. Ensure that you have installed AWS CLI version 2, as AWS SSO support is only available in version 2 and above. Create a new AWS profile in `~/.aws/config`. Here's an example of the `~/.aws/config` profile: + ``` [profile scaf] sso_start_url = https://sixfeetup.awsapps.com/start @@ -43,6 +44,7 @@ Note the `sso_role_name` setting above. Make sure to use a role that provides you with the necessary permissions to deploy infrastructure on your AWS account. Export the `AWS_PROFILE` environment variable and continue logging in: + ``` $ export AWS_PROFILE=scaf $ aws sso login @@ -50,6 +52,7 @@ $ aws sso login This should open your browser, allowing you to sign in to your AWS account. Upon successful login, you will see a message to confirm it: + ``` Successfully logged into Start URL: https://sixfeetup.awsapps.com/start ``` @@ -60,24 +63,27 @@ The first step is to bootstrap the Terraform state. This involves creating an S3 bucket and a DynamoDB table to manage the state and locking. 1. Navigate to the `bootstrap` directory: - ```bash - cd bootstrap - ``` + + ```bash + cd bootstrap + ``` 2. Initialize the Terraform configuration: - ```bash - terraform init - ``` + + ```bash + terraform init + ``` 3. Plan the Terraform configuration: - ```bash - terraform plan -out="tfplan.out" - ``` + + ```bash + terraform plan -out="tfplan.out" + ``` 4. Apply the Terraform configuration: - ```bash - terraform apply tfplan.out - ``` + ```bash + terraform apply tfplan.out + ``` ### Step 3: GitHub OIDC Provider @@ -85,24 +91,27 @@ After bootstrapping the state, the next step is to set up the GitHub OIDC provider. 1. Navigate to the `github` directory: - ```bash - cd ../github - ``` + + ```bash + cd ../github + ``` 2. Initialize the Terraform configuration: - ```bash - terraform init - ``` + + ```bash + terraform init + ``` 3. Plan the Terraform configuration: - ```bash - terraform plan -out="tfplan.out" - ``` + + ```bash + terraform plan -out="tfplan.out" + ``` 4. Apply the Terraform configuration: - ```bash - terraform apply tfplan.out - ``` + ```bash + terraform apply tfplan.out + ``` ### Step 4: Environment Configurations @@ -112,31 +121,33 @@ sandbox, staging). 1. Navigate to the desired environment directory (e.g., `prod`, `sandbox`, `staging`): - ```bash - cd ../ - ``` + ```bash + cd ../ + ``` 2. Initialize the Terraform configuration: - ```bash - terraform init - ``` + + ```bash + terraform init + ``` 3. Restrict the IPs allowed to manage the cluster. Edit - `/cluster.tf` and set the following variables: - ``` - kubectl_allowed_ips = "10.0.0.1/32,10.0.0.2/32" - talos_allowed_ips = "10.0.0.1/32,10.0.0.2/32" - ``` + `/cluster.tf` and set the following variables: + + ``` + admin_allowed_ips = "10.0.0.1/32,10.0.0.2/32" + ``` 4. Plan the Terraform configuration: - ```bash - terraform plan -out="tfplan.out" - ``` + + ```bash + terraform plan -out="tfplan.out" + ``` 5. Apply the Terraform configuration: - ```bash - terraform apply tfplan.out - ``` + ```bash + terraform apply tfplan.out + ``` ## Summary @@ -147,7 +158,6 @@ configurations: 2. Set up the GitHub OIDC provider (`github` directory). 3. Configure the desired environment (`prod`, `sandbox`, or `staging` directory). - Each step involves running `terraform init`, `terraform plan -out="tfplan.out"`, and `terraform apply tfplan.out`. diff --git a/{{cookiecutter.project_slug}}/terraform/modules/base/ec2.tf b/{{cookiecutter.project_slug}}/terraform/modules/base/ec2.tf index f5af6687..dc055656 100644 --- a/{{cookiecutter.project_slug}}/terraform/modules/base/ec2.tf +++ b/{{cookiecutter.project_slug}}/terraform/modules/base/ec2.tf @@ -1,8 +1,34 @@ -data "aws_ami" "talos" { +{% if cookiecutter.operating_system == "talos" %} +data "aws_ami" "os" { owners = ["540036508848"] # Sidero Labs most_recent = true name_regex = "^talos-v\\d+\\.\\d+\\.\\d+-${data.aws_availability_zones.available.id}-amd64$" } +{%- elif cookiecutter.operating_system == "k3s" %} +data "aws_ami" "os" { + most_recent = true + owners = ["099720109477"] # Canonical + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } +} + +resource "tls_private_key" "deploy_key" { + algorithm = "ED25519" +} + +resource "aws_key_pair" "default_key" { + key_name = "hotelnames_deploy_key" + public_key = tls_private_key.deploy_key.public_key_openssh +} + +{%- endif %} locals { cluster_required_tags = { @@ -17,7 +43,7 @@ module "control_plane_nodes" { count = var.control_plane.num_instances name = "${var.cluster_name}-${count.index}" - ami = var.control_plane.ami_id == null ? data.aws_ami.talos.id : var.control_plane.ami_id + ami = var.control_plane.ami_id == null ? data.aws_ami.os.id : var.control_plane.ami_id monitoring = true instance_type = var.control_plane.instance_type iam_instance_profile = aws_iam_instance_profile.ec2_instance_profile.name @@ -26,6 +52,10 @@ module "control_plane_nodes" { create_iam_instance_profile = false tags = merge(local.common_tags, local.cluster_required_tags) +{%- if cookiecutter.operating_system == "k3s" %} + key_name = aws_key_pair.default_key.key_name +{%- endif %} + vpc_security_group_ids = [module.cluster_sg.security_group_id] } diff --git a/{{cookiecutter.project_slug}}/terraform/modules/base/outputs.tf b/{{cookiecutter.project_slug}}/terraform/modules/base/outputs.tf index ada14deb..ce305c19 100644 --- a/{{cookiecutter.project_slug}}/terraform/modules/base/outputs.tf +++ b/{{cookiecutter.project_slug}}/terraform/modules/base/outputs.tf @@ -25,6 +25,20 @@ output "amazon_ses_user_secret_key" { {% endif %} output "control_plane_nodes_public_ips" { - description = "The public ip addresses of the talos control plane nodes." + description = "The public ip addresses of the control plane nodes." value = join(",", module.control_plane_nodes.*.public_ip) } + +output "control_plane_nodes_private_ips" { + description = "The private ip addresses of the control plane nodes." + value = join(",", module.control_plane_nodes.*.private_ip) +} + +output "private_deploy_key" { + value = tls_private_key.deploy_key.private_key_openssh + sensitive = true +} + +output "public_deploy_key" { + value = tls_private_key.deploy_key.public_key_openssh +} diff --git a/{{cookiecutter.project_slug}}/terraform/modules/base/security_groups.tf b/{{cookiecutter.project_slug}}/terraform/modules/base/security_groups.tf index a5d33f82..f0df4dc7 100644 --- a/{{cookiecutter.project_slug}}/terraform/modules/base/security_groups.tf +++ b/{{cookiecutter.project_slug}}/terraform/modules/base/security_groups.tf @@ -30,17 +30,26 @@ module "cluster_sg" { from_port = 6443 to_port = 6443 protocol = "tcp" - cidr_blocks = var.kubectl_allowed_ips + cidr_blocks = var.admin_allowed_ips description = "Kubernetes API Access" }, - # TODO: add cookiecutter.use_talos check +{% if cookiecutter.operating_system == "talos" %} { from_port = 50000 to_port = 50000 protocol = "tcp" - cidr_blocks = var.talosctl_allowed_ips + cidr_blocks = var.admin_allowed_ips description = "Talos API Access" }, +{%- elif cookiecutter.operating_system == "k3s" %} + { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = var.admin_allowed_ips + description = "Talos API Access" + }, +{%- endif %} ] egress_with_cidr_blocks = [ diff --git a/{{cookiecutter.project_slug}}/terraform/modules/base/variables.tf b/{{cookiecutter.project_slug}}/terraform/modules/base/variables.tf index 1b321015..376a043f 100644 --- a/{{cookiecutter.project_slug}}/terraform/modules/base/variables.tf +++ b/{{cookiecutter.project_slug}}/terraform/modules/base/variables.tf @@ -90,12 +90,13 @@ variable "cluster_vpc_cidr" { default = "172.16.0.0/16" } -# TODO: add cookiecutter.use_talos check +{% if cookiecutter.operating_system == "talos" %} variable "config_patch_files" { description = "Path to talos config path files that applies to all nodes" type = list(string) default = [] } +{%- endif %} variable "repo_name" { type = string @@ -121,19 +122,12 @@ variable "backend_ecr_repo" { default = "{{ cookiecutter.project_dash }}-sandbox-backend" } -variable "kubectl_allowed_ips" { +variable "admin_allowed_ips" { description = "A list of CIDR blocks that are allowed to access the kubernetes api" type = string default = "0.0.0.0/0" } -# TODO: add cookiecutter.use_talos check -variable "talosctl_allowed_ips" { - description = "A list of CIDR blocks that are allowed to access the talos api" - type = string - default = "0.0.0.0/0" -} - variable "tags" { type = map(string) default = {} diff --git a/{{cookiecutter.project_slug}}/terraform/modules/base/versions.tf b/{{cookiecutter.project_slug}}/terraform/modules/base/versions.tf index ecd58f1a..932fa456 100644 --- a/{{cookiecutter.project_slug}}/terraform/modules/base/versions.tf +++ b/{{cookiecutter.project_slug}}/terraform/modules/base/versions.tf @@ -9,14 +9,6 @@ terraform { source = "siderolabs/talos" version = "0.5.0" } - helm = { - source = "hashicorp/helm" - version = "2.13.2" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = "2.30.0" - } null = { source = "hashicorp/null" version = "3.2.2" @@ -25,9 +17,5 @@ terraform { source = "hashicorp/local" version = ">= 2.5.1" } - template = { - source = "hashicorp/template" - version = ">= 2.2.0" - } } } diff --git a/{{cookiecutter.project_slug}}/terraform/prod/cluster.tf b/{{cookiecutter.project_slug}}/terraform/prod/cluster.tf index 06f7fef5..7a3eb0bb 100644 --- a/{{cookiecutter.project_slug}}/terraform/prod/cluster.tf +++ b/{{cookiecutter.project_slug}}/terraform/prod/cluster.tf @@ -16,8 +16,7 @@ module "cluster" { # ami_id = "ami-09d22b42af049d453" } - # NB!: limit kubectl_allowed_ips and talos_allowed_ips to a set of trusted + # NB!: limit admin_allowed_ips to a set of trusted # public ip addresses. Both variables are comma separated lists of ips. - # kubectl_allowed_ips = "10.0.0.1/32,10.0.0.2/32" - # talos_allowed_ips = "10.0.0.1/32,10.0.0.2/32" + # admin_allowed_ips = "10.0.0.1/32,10.0.0.2/32" } diff --git a/{{cookiecutter.project_slug}}/terraform/sandbox/cluster.tf b/{{cookiecutter.project_slug}}/terraform/sandbox/cluster.tf index 736a5cf4..d1409e5b 100644 --- a/{{cookiecutter.project_slug}}/terraform/sandbox/cluster.tf +++ b/{{cookiecutter.project_slug}}/terraform/sandbox/cluster.tf @@ -17,8 +17,7 @@ module "cluster" { } - # NB!: limit kubectl_allowed_ips and talos_allowed_ips to a set of trusted + # NB!: limit admin_allowed_ips to a set of trusted # public ip addresses. Both variables are comma separated lists of ips. - # kubectl_allowed_ips = "10.0.0.1/32,10.0.0.2/32" - # talos_allowed_ips = "10.0.0.1/32,10.0.0.2/32" + # admin_allowed_ips = "10.0.0.1/32,10.0.0.2/32" } diff --git a/{{cookiecutter.project_slug}}/terraform/staging/cluster.tf b/{{cookiecutter.project_slug}}/terraform/staging/cluster.tf index 9efa34d0..30c18195 100644 --- a/{{cookiecutter.project_slug}}/terraform/staging/cluster.tf +++ b/{{cookiecutter.project_slug}}/terraform/staging/cluster.tf @@ -16,8 +16,7 @@ module "cluster" { # ami_id = "ami-09d22b42af049d453" } - # NB!: limit kubectl_allowed_ips and talos_allowed_ips to a set of trusted + # NB!: limit admin_allowed_ips to a set of trusted # public ip addresses. Both variables are comma separated lists of ips. - # kubectl_allowed_ips = "10.0.0.1/32,10.0.0.2/32" - # talos_allowed_ips = "10.0.0.1/32,10.0.0.2/32" + # admin_allowed_ips = "10.0.0.1/32,10.0.0.2/32" }