diff --git a/examples/gke-basic-tiller/main.tf b/examples/gke-basic-tiller/main.tf index 06e19a0..a6ba3c1 100644 --- a/examples/gke-basic-tiller/main.tf +++ b/examples/gke-basic-tiller/main.tf @@ -10,6 +10,10 @@ terraform { required_version = ">= 0.10.3" } +# --------------------------------------------------------------------------------------------------------------------- +# PREPARE PROVIDERS +# --------------------------------------------------------------------------------------------------------------------- + provider "google" { version = "~> 2.3.0" project = "${var.project}" @@ -74,7 +78,9 @@ module "gke_cluster" { cluster_secondary_range_name = "${google_compute_subnetwork.main.secondary_ip_range.0.range_name}" } -# Deploy a Node Pool +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NODE POOL +# --------------------------------------------------------------------------------------------------------------------- resource "google_container_node_pool" "node_pool" { provider = "google-beta" diff --git a/examples/gke-basic-tiller/variables.tf b/examples/gke-basic-tiller/variables.tf index 5c8d2e8..987fcb3 100644 --- a/examples/gke-basic-tiller/variables.tf +++ b/examples/gke-basic-tiller/variables.tf @@ -4,7 +4,7 @@ # --------------------------------------------------------------------------------------------------------------------- variable "project" { - description = "The name of the GCP Project where all resources will be launched." + description = "The project ID where all resources will be launched." } variable "location" { diff --git a/examples/gke-private-cluster/README.md b/examples/gke-private-cluster/README.md index 589a78a..28ed075 100644 --- a/examples/gke-private-cluster/README.md +++ b/examples/gke-private-cluster/README.md @@ -37,10 +37,12 @@ Currently, you cannot use a proxy to reach the cluster master of a regional clus ## How do you run these examples? -1. Install [Terraform](https://www.terraform.io/). -1. Make sure you have Python installed (version 2.x) and in your `PATH`. -1. Open `variables.tf`, and fill in any required variables that don't have a -default. +1. Install [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) v0.10.3 or later. +1. Open `variables.tf` and fill in any required variables that don't have a default. 1. Run `terraform get`. 1. Run `terraform plan`. 1. If the plan looks good, run `terraform apply`. +1. To setup `kubectl` to access the deployed cluster, run `gcloud beta container clusters get-credentials $CLUSTER_NAME +--region $REGION --project $PROJECT`, where `CLUSTER_NAME`, `REGION` and `PROJECT` correspond to what you set for the +input variables. + diff --git a/examples/gke-private-cluster/main.tf b/examples/gke-private-cluster/main.tf new file mode 100644 index 0000000..a1c45cd --- /dev/null +++ b/examples/gke-private-cluster/main.tf @@ -0,0 +1,162 @@ +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY A GKE PRIVATE CLUSTER IN GOOGLE CLOUD +# This is an example of how to use the gke-cluster module to deploy a public Kubernetes cluster in GCP +# --------------------------------------------------------------------------------------------------------------------- + +# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via +# https://github.com/terraform-providers/terraform-provider-google +terraform { + required_version = ">= 0.10.3" +} + +# --------------------------------------------------------------------------------------------------------------------- +# PREPARE PROVIDERS +# --------------------------------------------------------------------------------------------------------------------- + +provider "google" { + version = "~> 2.3.0" + project = "${var.project}" + region = "${var.region}" +} + +provider "google-beta" { + version = "~> 2.3.0" + project = "${var.project}" + region = "${var.region}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY A PRIVATE CLUSTER IN GOOGLE CLOUD +# --------------------------------------------------------------------------------------------------------------------- + +module "gke_cluster" { + # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you + # to a specific version of the modules, such as the following example: + # source = "git::git@github.com:gruntwork-io/gke-cluster.git//modules/gke-cluster?ref=v0.0.4" + source = "../../modules/gke-cluster" + + name = "${var.cluster_name}" + + project = "${var.project}" + location = "${var.location}" + network = "${google_compute_network.main.name}" + subnetwork = "${google_compute_subnetwork.main.self_link}" + + # When creating a private cluster, the 'master_ipv4_cidr_block' has to be defined and the size must be /28 + master_ipv4_cidr_block = "10.5.0.0/28" + + # This setting will make the cluster private + enable_private_nodes = "true" + + # To make testing easier, we keep the public endpoint available. In production, we highly recommend restricting access to only within the network boundary, requiring your users to use a bastion host or VPN. + disable_public_endpoint = "false" + + # With a private cluster, it is highly recommended to restrict access to the cluster master + # However, for testing purposes we will allow all inbound traffic. + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "0.0.0.0/0" + display_name = "all-for-testing" + }] + }] + + cluster_secondary_range_name = "${google_compute_subnetwork.main.secondary_ip_range.0.range_name}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NODE POOL +# --------------------------------------------------------------------------------------------------------------------- + +resource "google_container_node_pool" "node_pool" { + provider = "google-beta" + + name = "private-pool" + project = "${var.project}" + location = "${var.location}" + cluster = "${module.gke_cluster.name}" + + initial_node_count = "1" + + autoscaling { + min_node_count = "1" + max_node_count = "5" + } + + management { + auto_repair = "true" + auto_upgrade = "true" + } + + node_config { + image_type = "COS" + machine_type = "n1-standard-1" + + labels = { + private-pools-example = "true" + } + + tags = ["private-pool-example"] + disk_size_gb = "30" + disk_type = "pd-standard" + preemptible = false + + service_account = "${module.gke_service_account.email}" + + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } + + lifecycle { + ignore_changes = ["initial_node_count"] + } + + timeouts { + create = "30m" + update = "30m" + delete = "30m" + } +} + +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A CUSTOM SERVICE ACCOUNT TO USE WITH THE GKE CLUSTER +# --------------------------------------------------------------------------------------------------------------------- + +module "gke_service_account" { + # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you + # to a specific version of the modules, such as the following example: + # source = "git::git@github.com:gruntwork-io/gke-cluster.git//modules/gke-service-account?ref=v0.0.1" + source = "../../modules/gke-service-account" + + name = "${var.cluster_service_account_name}" + project = "${var.project}" + description = "${var.cluster_service_account_description}" +} + +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NETWORK TO DEPLOY THE CLUSTER TO +# --------------------------------------------------------------------------------------------------------------------- + +# TODO(rileykarson): Add proper VPC network config once we've made a VPC module +resource "random_string" "suffix" { + length = 4 + special = false + upper = false +} + +resource "google_compute_network" "main" { + name = "${var.cluster_name}-network-${random_string.suffix.result}" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "main" { + name = "${var.cluster_name}-subnetwork-${random_string.suffix.result}" + ip_cidr_range = "10.3.0.0/17" + region = "${var.region}" + network = "${google_compute_network.main.self_link}" + + secondary_ip_range { + range_name = "private-cluster-pods" + ip_cidr_range = "10.4.0.0/18" + } +} diff --git a/examples/gke-private-cluster/outputs.tf b/examples/gke-private-cluster/outputs.tf new file mode 100644 index 0000000..51f473b --- /dev/null +++ b/examples/gke-private-cluster/outputs.tf @@ -0,0 +1,22 @@ +output "cluster_endpoint" { + description = "The IP address of the cluster master." + sensitive = true + value = "${module.gke_cluster.endpoint}" +} + +output "client_certificate" { + description = "Public certificate used by clients to authenticate to the cluster endpoint." + value = "${module.gke_cluster.client_certificate}" +} + +output "client_key" { + description = "Private key used by clients to authenticate to the cluster endpoint." + sensitive = true + value = "${module.gke_cluster.client_key}" +} + +output "cluster_ca_certificate" { + description = "The public certificate that is the root of trust for the cluster." + sensitive = true + value = "${module.gke_cluster.cluster_ca_certificate}" +} diff --git a/examples/gke-private-cluster/variables.tf b/examples/gke-private-cluster/variables.tf new file mode 100644 index 0000000..d0ad1eb --- /dev/null +++ b/examples/gke-private-cluster/variables.tf @@ -0,0 +1,36 @@ +# --------------------------------------------------------------------------------------------------------------------- +# REQUIRED PARAMETERS +# These variables are expected to be passed in by the operator. +# --------------------------------------------------------------------------------------------------------------------- + +variable "project" { + description = "The project ID where all resources will be launched." +} + +variable "location" { + description = "The location (region or zone) of the GKE cluster." +} + +variable "region" { + description = "The region for the network. If the cluster is regional, this must be the same region. Otherwise, it should be the region of the zone." +} + +# --------------------------------------------------------------------------------------------------------------------- +# OPTIONAL PARAMETERS +# These parameters have reasonable defaults. +# --------------------------------------------------------------------------------------------------------------------- + +variable "cluster_name" { + description = "The name of the Kubernetes cluster." + default = "example-private-cluster" +} + +variable "cluster_service_account_name" { + description = "The name of the custom service account used for the GKE cluster. This parameter is limited to a maximum of 28 characters." + default = "example-private-cluster-sa" +} + +variable "cluster_service_account_description" { + description = "A description of the custom service account used for the GKE cluster." + default = "Example GKE Cluster Service Account managed by Terraform" +} diff --git a/examples/gke-public-cluster/README.md b/examples/gke-public-cluster/README.md index bb1f7f3..979f0b9 100644 --- a/examples/gke-public-cluster/README.md +++ b/examples/gke-public-cluster/README.md @@ -56,3 +56,7 @@ your new zones are within the region your cluster is present in. 1. Run `terraform get`. 1. Run `terraform plan`. 1. If the plan looks good, run `terraform apply`. +1. To setup `kubectl` to access the deployed cluster, run `gcloud beta container clusters get-credentials $CLUSTER_NAME +--region $REGION --project $PROJECT`, where `CLUSTER_NAME`, `REGION` and `PROJECT` correspond to what you set for the +input variables. + diff --git a/examples/gke-public-cluster/main.tf b/examples/gke-public-cluster/main.tf index 7cce150..e16d998 100644 --- a/examples/gke-public-cluster/main.tf +++ b/examples/gke-public-cluster/main.tf @@ -10,6 +10,10 @@ terraform { required_version = ">= 0.10.3" } +# --------------------------------------------------------------------------------------------------------------------- +# PREPARE PROVIDERS +# --------------------------------------------------------------------------------------------------------------------- + provider "google" { version = "~> 2.3.0" project = "${var.project}" @@ -22,10 +26,14 @@ provider "google-beta" { region = "${var.region}" } +# --------------------------------------------------------------------------------------------------------------------- +# DEPLOY A PUBLIC CLUSTER IN GOOGLE CLOUD +# --------------------------------------------------------------------------------------------------------------------- + module "gke_cluster" { # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you # to a specific version of the modules, such as the following example: - # source = "git::git@github.com:gruntwork-io/gke-cluster.git//modules/gke-cluster?ref=v0.0.1" + # source = "git::git@github.com:gruntwork-io/gke-cluster.git//modules/gke-cluster?ref=v0.0.3" source = "../../modules/gke-cluster" name = "${var.cluster_name}" @@ -38,9 +46,10 @@ module "gke_cluster" { cluster_secondary_range_name = "${google_compute_subnetwork.main.secondary_ip_range.0.range_name}" } -# Node Pool +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NODE POOL +# --------------------------------------------------------------------------------------------------------------------- -// Node Pool Resource resource "google_container_node_pool" "node_pool" { provider = "google-beta" @@ -107,6 +116,9 @@ module "gke_service_account" { description = "${var.cluster_service_account_description}" } +# --------------------------------------------------------------------------------------------------------------------- +# CREATE A NETWORK TO DEPLOY THE CLUSTER TO +# --------------------------------------------------------------------------------------------------------------------- # TODO(rileykarson): Add proper VPC network config once we've made a VPC module resource "random_string" "suffix" { length = 4 diff --git a/examples/gke-public-cluster/variables.tf b/examples/gke-public-cluster/variables.tf index 21b875a..47fe283 100644 --- a/examples/gke-public-cluster/variables.tf +++ b/examples/gke-public-cluster/variables.tf @@ -4,7 +4,7 @@ # --------------------------------------------------------------------------------------------------------------------- variable "project" { - description = "The name of the GCP Project where all resources will be launched." + description = "The project ID where all resources will be launched." } variable "location" { diff --git a/modules/gke-cluster/README.md b/modules/gke-cluster/README.md index eb45fb9..886323e 100644 --- a/modules/gke-cluster/README.md +++ b/modules/gke-cluster/README.md @@ -67,6 +67,45 @@ using a shared VPC network (a network from another GCP project) using an explici See [considerations for cluster sizing](https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips#cluster_sizing) for more information on sizing secondary ranges for your VPC-native cluster. +## What is a private cluster? + +In a private cluster, the nodes have internal IP addresses only, which ensures that their workloads are isolated from the public Internet. +Private nodes do not have outbound Internet access, but Private Google Access provides private nodes and their workloads with +limited outbound access to Google Cloud Platform APIs and services over Google's private network. + +If you want your cluster nodes to be able to access the Internet, for example pull images from external container registries, +you will have to set up [Cloud NAT](https://cloud.google.com/nat/docs/overview). +See [Example GKE Setup](https://cloud.google.com/nat/docs/gke-example) for further information. + +You can create a private cluster by setting `enable_private_nodes` to `true`. Note that with a private cluster, setting +the master CIDR range with `master_ipv4_cidr_block` is also required. + +### How do I control access to the cluster master? + +In a private cluster, the master has two endpoints: + +* **Private endpoint:** This is the internal IP address of the master, behind an internal load balancer in the master's +VPC network. Nodes communicate with the master using the private endpoint. Any VM in your VPC network, and in the same +region as your private cluster, can use the private endpoint. + +* **Public endpoint:** This is the external IP address of the master. You can disable access to the public endpoint by setting +`enable_private_endpoint` to `true`. + +You can relax the restrictions by authorizing certain address ranges to access the endpoints with the input variable +`master_authorized_networks_config`. + +### Private cluster restrictions and limitations + +Private clusters have the following restrictions and limitations: + +* The size of the RFC 1918 block for the cluster master must be /28. +* The nodes in a private cluster must run Kubernetes version 1.8.14-gke.0 or later. +* You cannot convert an existing, non-private cluster to a private cluster. +* Each private cluster you create uses a unique VPC Network Peering. +* Deleting the VPC peering between the cluster master and the cluster nodes, deleting the firewall rules that allow +ingress traffic from the cluster master to nodes on port 10250, or deleting the default route to the default +Internet gateway, causes a private cluster to stop functioning. + ## What IAM roles does this module configure? (unimplemented) Given a service account, this module will enable the following IAM roles: diff --git a/modules/gke-cluster/main.tf b/modules/gke-cluster/main.tf index db39e26..b33689e 100644 --- a/modules/gke-cluster/main.tf +++ b/modules/gke-cluster/main.tf @@ -1,3 +1,13 @@ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# DEPLOY A GKE CLUSTER +# This module deploys a GKE cluster, a managed, production-ready environment for deploying containerized applications. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +# --------------------------------------------------------------------------------------------------------------------- +# Create the GKE Cluster +# We want to make a cluster with no node pools, and manage them all with the fine-grained google_container_node_pool resource +# --------------------------------------------------------------------------------------------------------------------- + resource "google_container_cluster" "cluster" { name = "${var.name}" description = "${var.description}" @@ -11,11 +21,8 @@ resource "google_container_cluster" "cluster" { monitoring_service = "${var.monitoring_service}" min_master_version = "${local.kubernetes_version}" - # We want to make a cluster with no node pools, and manage them all with the - # fine-grained google_container_node_pool resource. The API requires a node - # pool or an initial count to be defined; that initial count creates the + # The API requires a node pool or an initial count to be defined; that initial count creates the # "default node pool" with that # of nodes. - # # So, we need to set an initial_node_count of 1. This will make a default node # pool with server-defined defaults that Terraform will immediately delete as # part of Create. This leaves us in our desired state- with a cluster master @@ -24,12 +31,21 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 + # ip_allocation_policy.use_ip_aliases defaults to true, since we define the block `ip_allocation_policy` ip_allocation_policy { // Choose the range, but let GCP pick the IPs within the range cluster_secondary_range_name = "${var.cluster_secondary_range_name}" services_secondary_range_name = "${var.cluster_secondary_range_name}" } + # We can optionally control access to the cluster + # See https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters + private_cluster_config { + enable_private_endpoint = "${var.disable_public_endpoint}" + enable_private_nodes = "${var.enable_private_nodes}" + master_ipv4_cidr_block = "${var.master_ipv4_cidr_block}" + } + addons_config { http_load_balancing { disabled = "${var.http_load_balancing ? 0 : 1}" @@ -80,11 +96,19 @@ resource "google_container_cluster" "cluster" { } } +# --------------------------------------------------------------------------------------------------------------------- +# Prepare locals to keep the code cleaner +# --------------------------------------------------------------------------------------------------------------------- + locals { kubernetes_version = "${var.kubernetes_version != "latest" ? var.kubernetes_version : data.google_container_engine_versions.location.latest_node_version}" network_project = "${var.network_project != "" ? var.network_project : var.project}" } +# --------------------------------------------------------------------------------------------------------------------- +# Pull in data +# --------------------------------------------------------------------------------------------------------------------- + data "google_compute_network" "gke_network" { name = "${var.network}" project = "${local.network_project}" diff --git a/modules/gke-cluster/variables.tf b/modules/gke-cluster/variables.tf index cb1a0f8..a79fe1a 100644 --- a/modules/gke-cluster/variables.tf +++ b/modules/gke-cluster/variables.tf @@ -62,6 +62,21 @@ variable "http_load_balancing" { default = true } +variable "enable_private_nodes" { + description = "Control whether nodes have internal IP addresses only. If enabled, all nodes are given only RFC 1918 private addresses and communicate with the master via private networking." + default = "false" +} + +variable "disable_public_endpoint" { + description = "Control whether the master's internal IP address is used as the cluster endpoint. If set to 'true', the master can only be accessed from internal IP addresses." + default = "false" +} + +variable "master_ipv4_cidr_block" { + description = "The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning internal IP addresses to the master or set of masters, as well as the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network." + default = "" +} + // TODO(robmorgan): Are we using these values below? We should understand them more fully before adding them to configs. variable "network_project" { diff --git a/test/gke_cluster_test.go b/test/gke_cluster_test.go index 69210b2..4a2e93b 100644 --- a/test/gke_cluster_test.go +++ b/test/gke_cluster_test.go @@ -18,64 +18,91 @@ func TestGKECluster(t *testing.T) { // be fixed in a later release. //t.Parallel() - // Uncomment any of the following to skip that section during the test - // os.Setenv("SKIP_create_test_copy_of_examples", "true") - // os.Setenv("SKIP_create_terratest_options", "true") - // os.Setenv("SKIP_terraform_apply", "true") - // os.Setenv("SKIP_configure_kubectl", "true") - // os.Setenv("SKIP_wait_for_workers", "true") - // os.Setenv("SKIP_cleanup", "true") - - // Create a directory path that won't conflict - workingDir := filepath.Join(".", "stages", t.Name()) - - test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { - testFolder := test_structure.CopyTerraformFolderToTemp(t, "..", "examples") - logger.Logf(t, "path to test folder %s\n", testFolder) - terraformModulePath := filepath.Join(testFolder, "gke-public-cluster") - test_structure.SaveString(t, workingDir, "gkeClusterTerraformModulePath", terraformModulePath) - }) - - test_structure.RunTestStage(t, "create_terratest_options", func() { - gkeClusterTerraformModulePath := test_structure.LoadString(t, workingDir, "gkeClusterTerraformModulePath") - uniqueID := random.UniqueId() - project := gcp.GetGoogleProjectIDFromEnvVar(t) - region := gcp.GetRandomRegion(t, project, nil, nil) - iamUser := getIAMUserFromEnv() - gkeClusterTerratestOptions := createGKEClusterTerraformOptions(t, uniqueID, project, region, iamUser, gkeClusterTerraformModulePath) - test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) - test_structure.SaveString(t, workingDir, "project", project) - test_structure.SaveString(t, workingDir, "region", region) - test_structure.SaveString(t, workingDir, "iamUser", iamUser) - test_structure.SaveTerraformOptions(t, workingDir, gkeClusterTerratestOptions) - }) - - defer test_structure.RunTestStage(t, "cleanup", func() { - gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) - terraform.Destroy(t, gkeClusterTerratestOptions) - }) - - test_structure.RunTestStage(t, "terraform_apply", func() { - gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) - terraform.InitAndApply(t, gkeClusterTerratestOptions) - }) - - test_structure.RunTestStage(t, "configure_kubectl", func() { - gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) - project := test_structure.LoadString(t, workingDir, "project") - region := test_structure.LoadString(t, workingDir, "region") - clusterName := gkeClusterTerratestOptions.Vars["cluster_name"].(string) - - // gcloud beta container clusters get-credentials example-cluster --region australia-southeast1 --project dev-sandbox-123456 - cmd := shell.Command{ - Command: "gcloud", - Args: []string{"beta", "container", "clusters", "get-credentials", clusterName, "--region", region, "--project", project}, - } - - shell.RunCommand(t, cmd) - }) - - test_structure.RunTestStage(t, "wait_for_workers", func() { - verifyGkeNodesAreReady(t) - }) + var testcases = []struct { + testName string + exampleFolder string + }{ + { + "PublicCluster", + "gke-public-cluster", + }, + { + "PrivateCluster", + "gke-private-cluster", + }, + } + + for _, testCase := range testcases { + // The following is necessary to make sure testCase's values don't + // get updated due to concurrency within the scope of t.Run(..) below + testCase := testCase + + t.Run(testCase.testName, func(t *testing.T) { + // We are temporarily stopping the tests from running in parallel due to conflicting + // kubectl configs. This is a limitation in the current Terratest functions and will + // be fixed in a later release. + //t.Parallel() + + // Uncomment any of the following to skip that section during the test + //os.Setenv("SKIP_create_test_copy_of_examples", "true") + //os.Setenv("SKIP_create_terratest_options", "true") + //os.Setenv("SKIP_terraform_apply", "true") + //os.Setenv("SKIP_configure_kubectl", "true") + //os.Setenv("SKIP_wait_for_workers", "true") + //os.Setenv("SKIP_cleanup", "true") + + // Create a directory path that won't conflict + workingDir := filepath.Join(".", "stages", testCase.testName) + + test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { + testFolder := test_structure.CopyTerraformFolderToTemp(t, "..", "examples") + logger.Logf(t, "path to test folder %s\n", testFolder) + terraformModulePath := filepath.Join(testFolder, testCase.exampleFolder) + test_structure.SaveString(t, workingDir, "gkeClusterTerraformModulePath", terraformModulePath) + }) + + test_structure.RunTestStage(t, "create_terratest_options", func() { + gkeClusterTerraformModulePath := test_structure.LoadString(t, workingDir, "gkeClusterTerraformModulePath") + uniqueID := random.UniqueId() + project := gcp.GetGoogleProjectIDFromEnvVar(t) + region := gcp.GetRandomRegion(t, project, nil, nil) + iamUser := getIAMUserFromEnv() + gkeClusterTerratestOptions := createGKEClusterTerraformOptions(t, uniqueID, project, region, iamUser, gkeClusterTerraformModulePath) + test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) + test_structure.SaveString(t, workingDir, "project", project) + test_structure.SaveString(t, workingDir, "region", region) + test_structure.SaveString(t, workingDir, "iamUser", iamUser) + test_structure.SaveTerraformOptions(t, workingDir, gkeClusterTerratestOptions) + }) + + defer test_structure.RunTestStage(t, "cleanup", func() { + gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) + terraform.Destroy(t, gkeClusterTerratestOptions) + }) + + test_structure.RunTestStage(t, "terraform_apply", func() { + gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) + terraform.InitAndApply(t, gkeClusterTerratestOptions) + }) + + test_structure.RunTestStage(t, "configure_kubectl", func() { + gkeClusterTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) + project := test_structure.LoadString(t, workingDir, "project") + region := test_structure.LoadString(t, workingDir, "region") + clusterName := gkeClusterTerratestOptions.Vars["cluster_name"].(string) + + // gcloud beta container clusters get-credentials example-cluster --region australia-southeast1 --project dev-sandbox-123456 + cmd := shell.Command{ + Command: "gcloud", + Args: []string{"beta", "container", "clusters", "get-credentials", clusterName, "--region", region, "--project", project}, + } + + shell.RunCommand(t, cmd) + }) + + test_structure.RunTestStage(t, "wait_for_workers", func() { + verifyGkeNodesAreReady(t) + }) + }) + } }