From 5dedfb0a4618a6302741ab25ed8be0d872cc075c Mon Sep 17 00:00:00 2001
From: Loren Yu <loren@navapbc.com>
Date: Tue, 21 Nov 2023 08:33:42 -0800
Subject: [PATCH] Fix "invalid count argument" error (#476)

* Fix "invalid count argument" error

Additional changes
* Remove "password_ts" event from role manager lambda
* Merge redundant IAM policies for role manager ssm access
* Clean up DEBUG log level configuration in role manager

## Context

In https://github.com/navapbc/template-infra/issues/475, @rocketnova
discovered a bug that prevents terraform from creating a plan for the
database layer. The module sets the count for the db password secret to
be `length(aws_rds_cluster.db.master_user_secret)`, but this is
unnecessary since `aws_rds_cluster.db.master_user_secret` will always be
available as long as the rds_cluster's `manage_master_user_password` is
set to `true` which will always be the case since it is hardcoded to
`true` (see
https://github.com/navapbc/template-infra/blob/6b3588c9356a8134d64d145981c386df1bc09b7b/infra/modules/database/main.tf#L31).

This changeset removes the unnecessary count which fixes the terraform
plan.

This changeset also includes a number of minor cleanup changes:
* Remove the "password_ts" event from the role manager lambda that was
introduced in [PR
461](https://github.com/navapbc/template-infra/pull/461/files) and isn't
needed.
* Merge the IAM policy that was newly created in [PR
469](https://github.com/navapbc/template-infra/pull/469/files) with the
existing one that is conceptually identical.
* Clean up the DEBUG log level configuration in the role manager that
was introduced in [PR
469](https://github.com/navapbc/template-infra/pull/469/files)

## Migration notes

If the rds database cluster already exists and has
manage_master_user_password set to false, the terraform plan will fail
with the following error:

<img width="645" alt="image"
src="https://github.com/navapbc/platform-test/assets/447859/2df688a3-132e-4e7b-aa67-ccaed1028091">

thus, in order to migrate, we'll need to follow the following steps:

1. first do a targeted apply of the aws_rds_cluster by running the
following command (replace ENVIRONMENT_NAME with the correct
environment)

   ```
TF_CLI_ARGS_apply='-target="module.database.aws_rds_cluster.db"' make
infra-update-app-database APP_NAME=app ENVIRONMENT=<ENVIRONMENT_NAME>
   ```

<img width="1003" alt="image"
src="https://github.com/navapbc/platform-test/assets/447859/356530b0-14d6-44c2-8ca7-805ec1853ea2">
<img width="440" alt="image"
src="https://github.com/navapbc/platform-test/assets/447859/8be192c9-164e-4af8-af70-4af9a440c898">
<img width="386" alt="image"
src="https://github.com/navapbc/platform-test/assets/447859/b6c22268-8588-431e-b2d7-3eaeeb4ae0e7">

2. Then you can apply the rest of the changes normally with `make
infra-update-app-database APP_NAME=app ENVIRONMENT=<ENVIRONMENT_NAME>`

<img width="616" alt="image"
src="https://github.com/navapbc/platform-test/assets/447859/7c615bef-623e-4b97-b298-0425b896222c">
<img width="271" alt="image"
src="https://github.com/navapbc/platform-test/assets/447859/af35c5c5-5f3c-4106-8fee-a059945b5d7e">
---
 infra/modules/database/role-manager.tf        | 31 ++++++-------------
 .../database/role_manager/role_manager.py     |  7 -----
 2 files changed, 10 insertions(+), 28 deletions(-)

diff --git a/infra/modules/database/role-manager.tf b/infra/modules/database/role-manager.tf
index 4dca5d19d..beb36b82e 100644
--- a/infra/modules/database/role-manager.tf
+++ b/infra/modules/database/role-manager.tf
@@ -6,7 +6,7 @@
 # as well as viewing existing roles
 
 locals {
-  ssm_password_name = length(aws_rds_cluster.db.master_user_secret) == 1 ? "/aws/reference/secretsmanager/${data.aws_secretsmanager_secret.db_pass[0].name}" : ""
+  db_password_param_name = "/aws/reference/secretsmanager/${data.aws_secretsmanager_secret.db_password.name}"
 }
 
 resource "aws_lambda_function" "role_manager" {
@@ -33,7 +33,7 @@ resource "aws_lambda_function" "role_manager" {
       DB_PORT                = aws_rds_cluster.db.port
       DB_USER                = local.master_username
       DB_NAME                = aws_rds_cluster.db.database_name
-      DB_PASSWORD_PARAM_NAME = local.ssm_password_name
+      DB_PASSWORD_PARAM_NAME = local.db_password_param_name
       DB_PASSWORD_SECRET_ARN = aws_rds_cluster.db.master_user_secret[0].secret_arn
       DB_SCHEMA              = var.schema_name
       APP_USER               = var.app_username
@@ -84,9 +84,10 @@ resource "aws_kms_key" "role_manager" {
   enable_key_rotation = true
 }
 
-data "aws_secretsmanager_secret" "db_pass" {
-  count = length(aws_rds_cluster.db.master_user_secret)
-  arn   = aws_rds_cluster.db.master_user_secret[0].secret_arn
+data "aws_secretsmanager_secret" "db_password" {
+  # master_user_secret is available when aws_rds_cluster.db.manage_master_user_password
+  # is true (see https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/rds_cluster#master_user_secret)
+  arn = aws_rds_cluster.db.master_user_secret[0].secret_arn
 }
 
 # IAM for Role Manager lambda function
@@ -106,7 +107,7 @@ resource "aws_iam_role" "role_manager" {
 
 
 
-resource "aws_iam_role_policy" "ssm_access" {
+resource "aws_iam_role_policy" "role_manager_access_to_db_password" {
   name = "${var.name}-role-manager-ssm-access"
   role = aws_iam_role.role_manager.id
 
@@ -117,29 +118,17 @@ resource "aws_iam_role_policy" "ssm_access" {
         Effect   = "Allow"
         Action   = ["kms:Decrypt"]
         Resource = [data.aws_kms_key.default_ssm_key.arn]
-      }
-    ]
-  })
-}
-
-resource "aws_iam_role_policy" "database_credential_tool" {
-  count = length(aws_rds_cluster.db.master_user_secret)
-  name  = "${var.name}-role-manager-rds-ssm-access"
-  role  = aws_iam_role.role_manager.id
-
-  policy = jsonencode({
-    Version = "2012-10-17"
-    Statement = [
+      },
       {
         Effect   = "Allow"
         Action   = ["secretsmanager:GetSecretValue"]
-        Resource = [data.aws_secretsmanager_secret.db_pass[0].arn]
+        Resource = [data.aws_secretsmanager_secret.db_password.arn]
       },
       {
         Effect = "Allow"
         Action = ["ssm:GetParameter"]
         Resource = [
-          "arn:aws:ssm:${data.aws_region.current.name}:${data.aws_caller_identity.current.id}:parameter${local.ssm_password_name}"
+          "arn:aws:ssm:${data.aws_region.current.name}:${data.aws_caller_identity.current.id}:parameter${local.db_password_param_name}"
         ]
       }
     ]
diff --git a/infra/modules/database/role_manager/role_manager.py b/infra/modules/database/role_manager/role_manager.py
index 9f8fe8be8..910da843e 100644
--- a/infra/modules/database/role_manager/role_manager.py
+++ b/infra/modules/database/role_manager/role_manager.py
@@ -6,19 +6,12 @@
 import logging
 from pg8000.native import Connection, identifier
 
-logging.basicConfig()
-logging.getLogger('botocore').setLevel(logging.DEBUG)
-logging.getLogger('boto3').setLevel(logging.DEBUG)
-
 logger = logging.getLogger()
 logger.setLevel(logging.INFO)
 
 def lambda_handler(event, context):
     if event == "check":
         return check()
-    elif event == "password_ts":
-        connect_as_master_user()
-        return "Succeeded"
     else:
         return manage()