diff --git a/backend/dataall/base/aws/iam.py b/backend/dataall/base/aws/iam.py index bedfd4620..581638e22 100644 --- a/backend/dataall/base/aws/iam.py +++ b/backend/dataall/base/aws/iam.py @@ -1,5 +1,6 @@ import logging from botocore.exceptions import ClientError +import re from .sts import SessionHelper @@ -66,6 +67,25 @@ def get_role_policy( log.error(f'Failed to get policy {policy_name} of role {role_name} : {e}') return None + @staticmethod + def list_policy_names_by_policy_pattern(account_id: str, region: str, policy_filter_pattern: str): + try: + client = IAM.client(account_id, region) + # Setting Scope to 'Local' to fetch all the policies created in this account + paginator = client.get_paginator('list_policies') + policies = [] + for page in paginator.paginate(Scope='Local'): + policies.extend(page['Policies']) + policy_names = [policy.get('PolicyName') for policy in policies] + return [policy_nm for policy_nm in policy_names if re.search(policy_filter_pattern, policy_nm)] + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + raise Exception( + f'Data.all Environment Pivot Role does not have permissions to get policies with pattern {policy_filter_pattern} due to: {e}' + ) + log.error(f'Failed to get policies for policy pattern due to: {e}') + return [] + @staticmethod def delete_role_policy( account_id: str, @@ -101,6 +121,23 @@ def get_managed_policy_by_name(account_id: str, region: str, policy_name: str): log.error(f'Failed to get policy {policy_name}: {e}') return None + @staticmethod + def get_managed_policy_document_by_name(account_id: str, region: str, policy_name: str): + try: + arn = f'arn:aws:iam::{account_id}:policy/{policy_name}' + client = IAM.client(account_id, region) + policy = IAM.get_managed_policy_by_name(account_id, region, policy_name) + policyVersionId = policy['DefaultVersionId'] + response = client.get_policy_version(PolicyArn=arn, VersionId=policyVersionId) + return response['PolicyVersion']['Document'] + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + raise Exception( + f'Data.all Environment Pivot Role does not have permissions to to get policy {policy_name}: {e}' + ) + log.error(f'Failed to get policy {policy_name}: {e}') + return None + @staticmethod def create_managed_policy(account_id: str, region: str, policy_name: str, policy: str): try: @@ -275,3 +312,16 @@ def remove_invalid_role_ids(account_id: str, region: str, principal_list): if 'AROA' in p_id: if p_id not in all_role_ids: principal_list.remove(p_id) + + @staticmethod + def get_attached_managed_policies_to_role(account_id: str, region: str, role_name: str): + try: + client = IAM.client(account_id, region) + response = client.list_attached_role_policies(RoleName=role_name) + return [policy.get('PolicyName') for policy in response['AttachedPolicies']] + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + raise Exception( + f'Data.all Environment Pivot Role does not have permissions to get attached managed policies for {role_name}: {e}' + ) + raise Exception(f'Failed to get attached managed policies for {role_name}: {e}') diff --git a/backend/dataall/base/aws/service_quota.py b/backend/dataall/base/aws/service_quota.py new file mode 100644 index 000000000..dda57474a --- /dev/null +++ b/backend/dataall/base/aws/service_quota.py @@ -0,0 +1,58 @@ +import logging +from botocore.exceptions import ClientError + +from .sts import SessionHelper + +log = logging.getLogger(__name__) + + +class ServiceQuota: + def __init__(self, account_id, region): + session = SessionHelper.remote_session(accountid=account_id, region=region) + self.client = session.client('service-quotas') + + def list_services(self): + try: + log.info('Fetching services list with service codes in aws account') + services_list = [] + paginator = self.client.get_paginator('list_services') + for page in paginator.paginate(): + services_list.extend(page.get('Services')) + return services_list + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + raise Exception(f'Data.all Environment Pivot Role does not have permissions to do list_services : {e}') + log.error(f'Failed list services and service codes due to: {e}') + return [] + + def list_service_quota(self, service_code): + try: + log.info('Fetching services quota code in aws account') + service_quota_code_list = [] + paginator = self.client.get_paginator('list_service_quotas') + for page in paginator.paginate(ServiceCode=service_code): + service_quota_code_list.extend(page.get('Quotas')) + log.debug(f'Services quota list: {service_quota_code_list}') + return service_quota_code_list + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + raise Exception( + f'Data.all Environment Pivot Role does not have permissions to do list_service_quota : {e}' + ) + log.error(f'Failed list quota codes to: {e}') + return [] + + def get_service_quota_value(self, service_code, service_quota_code): + try: + log.info( + f'Getting service quota for service code: {service_code} and service quota code: {service_quota_code}' + ) + response = self.client.get_service_quota(ServiceCode=service_code, QuotaCode=service_quota_code) + return response['Quota']['Value'] + except ClientError as e: + if e.response['Error']['Code'] == 'AccessDenied': + raise Exception( + f'Data.all Environment Pivot Role does not have permissions to do get_service_quota: {e}' + ) + log.error(f'Failed list quota codes to: {e}') + return None diff --git a/backend/dataall/base/db/exceptions.py b/backend/dataall/base/db/exceptions.py index 95c2c2a73..c3c5d91e9 100644 --- a/backend/dataall/base/db/exceptions.py +++ b/backend/dataall/base/db/exceptions.py @@ -147,6 +147,18 @@ def __str__(self): return f'{self.message}' +class AWSServiceQuotaExceeded(Exception): + def __init__(self, action, message): + self.action = action + self.message = f""" + An error occurred (AWSResourceQuotaExceeded) when calling {self.action} operation: + {message} + """ + + def __str__(self): + return f'{self.message}' + + class EnvironmentResourcesFound(Exception): def __init__(self, action, message): self.action = action diff --git a/backend/dataall/base/utils/iam_cdk_utils.py b/backend/dataall/base/utils/iam_cdk_utils.py new file mode 100644 index 000000000..c87af5204 --- /dev/null +++ b/backend/dataall/base/utils/iam_cdk_utils.py @@ -0,0 +1,76 @@ +from typing import Dict, Any, List +from aws_cdk import aws_iam as iam + +from dataall.base.utils.iam_policy_utils import ( + split_policy_statements_in_chunks, + split_policy_with_resources_in_statements, + split_policy_with_mutiple_value_condition_in_statements, +) + + +def convert_from_json_to_iam_policy_statement_with_conditions(iam_policy: Dict[Any, Any]): + return iam.PolicyStatement( + sid=iam_policy.get('Sid'), + effect=iam.Effect.ALLOW if iam_policy.get('Effect').casefold() == 'Allow'.casefold() else iam.Effect.DENY, + actions=_convert_to_array(str, iam_policy.get('Action')), + resources=_convert_to_array(str, iam_policy.get('Resource')), + conditions=iam_policy.get('Condition'), + ) + + +def convert_from_json_to_iam_policy_statement_with_resources(iam_policy: Dict[Any, Any]): + return iam.PolicyStatement( + sid=iam_policy.get('Sid'), + effect=iam.Effect.ALLOW if iam_policy.get('Effect').casefold() == 'Allow'.casefold() else iam.Effect.DENY, + actions=_convert_to_array(str, iam_policy.get('Action')), + resources=_convert_to_array(str, iam_policy.get('Resource')), + ) + + +def process_and_split_statements_in_chunks(statements: List[Dict]): + statement_chunks_json: List[List[Dict]] = split_policy_statements_in_chunks(statements) + statements_chunks: List[List[iam.PolicyStatement]] = [] + for statement_js_chunk in statement_chunks_json: + statements: List[iam.PolicyStatement] = [] + for statement in statement_js_chunk: + if statement.get('Condition', None): + statements.append(convert_from_json_to_iam_policy_statement_with_conditions(statement)) + else: + statements.append(convert_from_json_to_iam_policy_statement_with_resources(statement)) + statements_chunks.append(statements) + return statements_chunks + + +def process_and_split_policy_with_conditions_in_statements( + base_sid: str, effect: str, actions: List[str], resources: List[str], condition_dict: Dict = None +): + json_statements = split_policy_with_mutiple_value_condition_in_statements( + base_sid=base_sid, effect=effect, actions=actions, resources=resources, condition_dict=condition_dict + ) + + iam_statements: [iam.PolicyStatement] = [] + for json_statement in json_statements: + iam_policy_statement = convert_from_json_to_iam_policy_statement_with_conditions(json_statement) + iam_statements.append(iam_policy_statement) + return iam_statements + + +def process_and_split_policy_with_resources_in_statements( + base_sid: str, effect: str, actions: List[str], resources: List[str] +): + json_statements = split_policy_with_resources_in_statements( + base_sid=base_sid, effect=effect, actions=actions, resources=resources + ) + iam_statements: [iam.PolicyStatement] = [] + for json_statement in json_statements: + iam_policy_statement = convert_from_json_to_iam_policy_statement_with_resources(json_statement) + iam_statements.append(iam_policy_statement) + return iam_statements + + +# If item is of item type i.e. single instance if present, then wrap in an array. +# This is helpful at places where array is required even if one element is present +def _convert_to_array(item_type, item): + if isinstance(item, item_type): + return [item] + return item diff --git a/backend/dataall/base/utils/iam_policy_utils.py b/backend/dataall/base/utils/iam_policy_utils.py index 3e985f93e..9e47316e0 100644 --- a/backend/dataall/base/utils/iam_policy_utils.py +++ b/backend/dataall/base/utils/iam_policy_utils.py @@ -1,6 +1,5 @@ -from typing import List, Callable +from typing import List, Callable, Dict import logging -from aws_cdk import aws_iam as iam logger = logging.getLogger(__name__) @@ -9,7 +8,7 @@ MAXIMUM_NUMBER_MANAGED_POLICIES = 20 # Soft limit 10, hard limit 20 -def split_policy_statements_in_chunks(statements: List): +def split_policy_statements_in_chunks(statements: List[Dict]): """ Splitter used for IAM policies with an undefined number of statements - Ensures that the size of the IAM policy remains below the POLICY LIMIT @@ -18,7 +17,7 @@ def split_policy_statements_in_chunks(statements: List): """ chunks = [] index = 0 - statements_list_of_strings = [str(s.to_json()) for s in statements] + statements_list_of_strings = [str(s) for s in statements] total_length = len(', '.join(statements_list_of_strings)) logger.info(f'Number of statements = {len(statements)}') logger.info(f'Total length of statements = {total_length}') @@ -31,13 +30,12 @@ def split_policy_statements_in_chunks(statements: List): chunk = [] chunk_size = 0 while ( - index < len(statements) - and chunk_size + len(str(statements[index].to_json())) < POLICY_LIMIT - POLICY_HEADERS_BUFFER + index < len(statements) and chunk_size + len(str(statements[index])) < POLICY_LIMIT - POLICY_HEADERS_BUFFER ): # Appends a statement to the chunk until we reach its maximum size. # It compares, current size of the statements < allowed size for the statements section of a policy chunk.append(statements[index]) - chunk_size += len(str(statements[index].to_json())) + chunk_size += len(str(statements[index])) index += 1 chunks.append(chunk) logger.info(f'Total number of managed policies = {len(chunks)}') @@ -46,15 +44,13 @@ def split_policy_statements_in_chunks(statements: List): return chunks -def split_policy_with_resources_in_statements( - base_sid: str, effect: iam.Effect, actions: List[str], resources: List[str] -): +def split_policy_with_resources_in_statements(base_sid: str, effect: str, actions: List[str], resources: List[str]): """ The variable part of the policy is in the resources parameter of the PolicyStatement """ def _build_statement(split, subset): - return iam.PolicyStatement(sid=base_sid + str(split), effect=effect, actions=actions, resources=subset) + return {'Sid': base_sid + str(split), 'Effect': effect, 'Action': actions, 'Resource': subset} total_length, base_length = _policy_analyzer(resources, _build_statement) extra_chars = len('" ," ') @@ -72,7 +68,7 @@ def _build_statement(split, subset): def split_policy_with_mutiple_value_condition_in_statements( - base_sid: str, effect: iam.Effect, actions: List[str], resources: List[str], condition_dict: dict + base_sid: str, effect: str, actions: List[str], resources: List[str], condition_dict: dict ): """ The variable part of the policy is in the conditions parameter of the PolicyStatement @@ -80,13 +76,13 @@ def split_policy_with_mutiple_value_condition_in_statements( """ def _build_statement(split, subset): - return iam.PolicyStatement( - sid=base_sid + str(split), - effect=effect, - actions=actions, - resources=resources, - conditions={condition_dict.get('key'): {condition_dict.get('resource'): subset}}, - ) + return { + 'Sid': base_sid + str(split), + 'Effect': effect, + 'Action': actions, + 'Resource': resources, + 'Condition': {condition_dict.get('key'): {condition_dict.get('resource'): subset}}, + } total_length, base_length = _policy_analyzer(condition_dict.get('values'), _build_statement) extra_chars = len( @@ -109,13 +105,13 @@ def _build_statement(split, subset): return resulting_statements -def _policy_analyzer(resources: List[str], statement_builder: Callable[[int, List[str]], iam.PolicyStatement]): +def _policy_analyzer(resources: List[str], statement_builder: Callable[[int, List[str]], Dict]): """ Calculates the policy size with the resources (total_length) and without resources (base_length) """ statement_without_resources = statement_builder(1, ['*']) resources_str = '" ," '.join(r for r in resources) - base_length = len(str(statement_without_resources.to_json())) + base_length = len(str(statement_without_resources)) total_length = base_length + len(resources_str) logger.info(f'Policy base length = {base_length}') logger.info(f'Resources as string length = {len(resources_str)}') @@ -128,7 +124,7 @@ def _policy_splitter( base_length: int, resources: List[str], extra_chars: int, - statement_builder: Callable[[int, List[str]], iam.PolicyStatement], + statement_builder: Callable[[int, List[str]], Dict], ): """ Splitter used for IAM policy statements with an undefined number of resources one of the parameters of the policy. diff --git a/backend/dataall/base/utils/naming_convention.py b/backend/dataall/base/utils/naming_convention.py index ecccfc7d5..feddbd830 100644 --- a/backend/dataall/base/utils/naming_convention.py +++ b/backend/dataall/base/utils/naming_convention.py @@ -57,6 +57,20 @@ def build_compliant_name(self) -> str: suffix = f'-{self.target_uri}' if len(self.target_uri) else '' return f'{slugify(self.resource_prefix + "-" + self.target_label[: (max_length - len(self.resource_prefix + self.target_uri))] + suffix, regex_pattern=rf"{regex}", separator=separator, lowercase=True)}' + def build_compliant_name_with_index(self, index: int = None) -> str: + """ + Builds a compliant AWS resource name with an index at the end of the policy name + IMP - If no index is provided, then this method provides a base policy name without index. Base policy name is calculated by considering the length of string required for index + This is done so that the base policy name doesn't change when an index is added to the string. + """ + regex = NamingConventionPattern[self.service].value['regex'] + separator = NamingConventionPattern[self.service].value['separator'] + max_length = NamingConventionPattern[self.service].value['max_length'] + index_string_length = 2 # This is added to adjust the target label string even if the index is set to None. This helps in getting the base policy name when index is None + index_string = f'-{index}' if index is not None else '' + suffix = f'-{self.target_uri}' if len(self.target_uri) else '' + return f'{slugify(self.resource_prefix + "-" + self.target_label[: (max_length - len(self.resource_prefix + self.target_uri) - index_string_length)] + suffix + index_string, regex_pattern=rf"{regex}", separator=separator, lowercase=True)}' + def validate_name(self): regex = NamingConventionPattern[self.service].value['regex'] max_length = NamingConventionPattern[self.service].value['max_length'] diff --git a/backend/dataall/core/environment/api/resolvers.py b/backend/dataall/core/environment/api/resolvers.py index 01a3c1bde..70c9fadea 100644 --- a/backend/dataall/core/environment/api/resolvers.py +++ b/backend/dataall/core/environment/api/resolvers.py @@ -175,13 +175,6 @@ def get_parent_organization(context: Context, source, **kwargs): return org -# used from ConsumptionRole type as field resolver -def resolve_consumption_role_policies(context: Context, source, **kwargs): - return EnvironmentService.resolve_consumption_role_policies( - uri=source.environmentUri, IAMRoleName=source.IAMRoleName - ) - - # used from getConsumptionRolePolicies query -- query resolver def get_consumption_role_policies(context: Context, source, environmentUri, IAMRoleName): return EnvironmentService.resolve_consumption_role_policies(uri=environmentUri, IAMRoleName=IAMRoleName) diff --git a/backend/dataall/core/environment/api/types.py b/backend/dataall/core/environment/api/types.py index a95d943c4..4479b7bab 100644 --- a/backend/dataall/core/environment/api/types.py +++ b/backend/dataall/core/environment/api/types.py @@ -3,7 +3,6 @@ from dataall.core.environment.api.resolvers import ( get_environment_stack, get_parent_organization, - resolve_consumption_role_policies, resolve_environment_networks, resolve_parameters, resolve_user_role, @@ -181,9 +180,6 @@ gql.Field(name='created', type=gql.String), gql.Field(name='updated', type=gql.String), gql.Field(name='deleted', type=gql.String), - gql.Field( - name='managedPolicies', type=gql.ArrayType(RoleManagedPolicy), resolver=resolve_consumption_role_policies - ), ], ) diff --git a/backend/dataall/core/environment/cdk/pivot_role_core_policies/__init__.py b/backend/dataall/core/environment/cdk/pivot_role_core_policies/__init__.py index 609d5ad3b..e7c2e7c09 100644 --- a/backend/dataall/core/environment/cdk/pivot_role_core_policies/__init__.py +++ b/backend/dataall/core/environment/cdk/pivot_role_core_policies/__init__.py @@ -10,6 +10,7 @@ sqs, ssm, sts, + service_quota, ) -__all__ = ['cloudformation', 'iam', 'kms', 'logging', 's3', 'sns', 'sqs', 'ssm', 'sts'] +__all__ = ['cloudformation', 'iam', 'kms', 'logging', 's3', 'sns', 'sqs', 'ssm', 'sts', 'service_quota'] diff --git a/backend/dataall/core/environment/cdk/pivot_role_core_policies/iam.py b/backend/dataall/core/environment/cdk/pivot_role_core_policies/iam.py index bbd1bdddc..15da792ad 100644 --- a/backend/dataall/core/environment/cdk/pivot_role_core_policies/iam.py +++ b/backend/dataall/core/environment/cdk/pivot_role_core_policies/iam.py @@ -13,7 +13,7 @@ def get_statements(self): statements = [ # IAM - needed for consumption roles and for S3 sharing iam.PolicyStatement( - sid='IAMListGet', effect=iam.Effect.ALLOW, actions=['iam:ListRoles', 'iam:Get*'], resources=['*'] + sid='IAMListGet', effect=iam.Effect.ALLOW, actions=['iam:List*', 'iam:Get*'], resources=['*'] ), iam.PolicyStatement( sid='PassRole', diff --git a/backend/dataall/core/environment/cdk/pivot_role_core_policies/service_quota.py b/backend/dataall/core/environment/cdk/pivot_role_core_policies/service_quota.py new file mode 100644 index 000000000..9895064d3 --- /dev/null +++ b/backend/dataall/core/environment/cdk/pivot_role_core_policies/service_quota.py @@ -0,0 +1,22 @@ +from dataall.core.environment.cdk.pivot_role_stack import PivotRoleStatementSet +from aws_cdk import aws_iam as iam + + +class ServiceQuotaPivotRole(PivotRoleStatementSet): + """ + Class including all permissions needed by the pivot role to work with AWS Service Quota. + It allows pivot role to: + - List and Get Service Quota details + """ + + def get_statements(self): + statements = [ + # Service Quota - Needed to determine the number of service quotas for managed policies which can be attached + iam.PolicyStatement( + sid='ServiceQuotaListGet', + effect=iam.Effect.ALLOW, + actions=['servicequotas:List*', 'servicequotas:Get*'], + resources=['*'], + ) + ] + return statements diff --git a/backend/dataall/core/environment/cdk/pivot_role_stack.py b/backend/dataall/core/environment/cdk/pivot_role_stack.py index d6368ea55..073712898 100644 --- a/backend/dataall/core/environment/cdk/pivot_role_stack.py +++ b/backend/dataall/core/environment/cdk/pivot_role_stack.py @@ -3,7 +3,11 @@ from typing import List from constructs import Construct from aws_cdk import Duration, aws_iam as iam, NestedStack -from dataall.base.utils.iam_policy_utils import split_policy_statements_in_chunks + +from dataall.base.utils.iam_cdk_utils import ( + process_and_split_statements_in_chunks, +) + logger = logging.getLogger(__name__) @@ -36,7 +40,8 @@ def generate_policies(self) -> List[iam.ManagedPolicy]: statements.extend(service.get_statements(self)) logger.info(f'statements: {str(service.get_statements(self))}') - statements_chunks = split_policy_statements_in_chunks(statements) + statements_json = [statement.to_json() for statement in statements] + statements_chunks: List[List[iam.PolicyStatement]] = process_and_split_statements_in_chunks(statements_json) for index, chunk in enumerate(statements_chunks): policies.append( diff --git a/backend/dataall/core/environment/services/managed_iam_policies.py b/backend/dataall/core/environment/services/managed_iam_policies.py index 5962f58fd..ebf7a90ce 100644 --- a/backend/dataall/core/environment/services/managed_iam_policies.py +++ b/backend/dataall/core/environment/services/managed_iam_policies.py @@ -29,42 +29,67 @@ def policy_type(self): ... @abstractmethod - def generate_policy_name(self) -> str: + def generate_old_policy_name(self) -> str: + """ + Returns string and needs to be implemented in the ManagedPolicies inherited classes. + Used for backwards compatibility. It should be deprecated in the future releases. + """ + + @abstractmethod + def generate_base_policy_name(self) -> str: """ Returns string and needs to be implemented in the ManagedPolicies inherited classes """ ... @abstractmethod - def generate_empty_policy(self) -> dict: + def generate_indexed_policy_name(self, index) -> str: """ - Returns dict and needs to be implemented in the ManagedPolicies inherited classes + Returns string of policy name with index at the end .Needs to be implemented in the ManagedPolicies inherited classes """ ... @abstractmethod - def create_managed_policy_from_inline_and_delete_inline(self) -> str: + def generate_empty_policy(self) -> dict: """ - Returns policy arn and needs to be implemented in the ManagedPolicies inherited classes - It is used for backwards compatibility. It should be deprecated and removed in future releases. + Returns dict and needs to be implemented in the ManagedPolicies inherited classes """ ... - def check_if_policy_exists(self) -> bool: - policy_name = self.generate_policy_name() - share_policy = IAM.get_managed_policy_by_name(self.account, self.region, policy_name) - return share_policy is not None - - def check_if_policy_attached(self): - policy_name = self.generate_policy_name() - return IAM.is_policy_attached(self.account, self.region, policy_name, self.role_name) - - def attach_policy(self): - policy_arn = f'arn:aws:iam::{self.account}:policy/{self.generate_policy_name()}' - try: - IAM.attach_role_policy(self.account, self.region, self.role_name, policy_arn) - except Exception as e: - raise Exception(f"Required customer managed policy {policy_arn} can't be attached: {e}") + def check_if_policy_exists(self, policy_name) -> bool: + policy = IAM.get_managed_policy_by_name(self.account, self.region, policy_name) + return policy is not None + + def get_managed_policies(self) -> List[str]: + policy_pattern = self.generate_base_policy_name() + policies = self._get_policy_names(policy_pattern) + return policies + + def check_if_policy_attached(self, policy_name): + is_policy_attached = IAM.is_policy_attached(self.account, self.region, policy_name, self.role_name) + return is_policy_attached + + def get_policies_unattached_to_role(self): + policy_pattern = self.generate_base_policy_name() + policies = self._get_policy_names(policy_pattern) + unattached_policies = [] + for policy_name in policies: + if not self.check_if_policy_attached(policy_name): + unattached_policies.append(policy_name) + return unattached_policies + + def attach_policies(self, managed_policies_list: List[str]): + for policy_name in managed_policies_list: + policy_arn = f'arn:aws:iam::{self.account}:policy/{policy_name}' + try: + IAM.attach_role_policy(self.account, self.region, self.role_name, policy_arn) + except Exception as e: + raise Exception(f"Required customer managed policy {policy_arn} can't be attached: {e}") + + def _get_policy_names(self, base_policy_name): + filter_pattern = r'{base_policy_name}-\d'.format(base_policy_name=base_policy_name) + policies = IAM.list_policy_names_by_policy_pattern(self.account, self.region, filter_pattern) + return policies class PolicyManager(object): @@ -99,12 +124,11 @@ def create_all_policies(self, managed) -> bool: Manager that registers and calls all policies created by data.all modules and that need to be created for consumption roles and team roles """ - try: - for Policy in self.initializedPolicies: - empty_policy = Policy.generate_empty_policy() - policy_name = Policy.generate_policy_name() - logger.info(f'Creating policy {policy_name}') - + for policy_manager in self.initializedPolicies: + empty_policy = policy_manager.generate_empty_policy() + policy_name = policy_manager.generate_indexed_policy_name(index=0) + logger.info(f'Creating policy: {policy_name}') + try: IAM.create_managed_policy( account_id=self.account, region=self.region, @@ -119,8 +143,9 @@ def create_all_policies(self, managed) -> bool: role_name=self.role_name, policy_arn=f'arn:aws:iam::{self.account}:policy/{policy_name}', ) - except Exception as e: - raise e + except Exception as e: + logger.error(f'Error while creating and attaching policies due to: {e}') + raise e return True def delete_all_policies(self) -> bool: @@ -128,23 +153,35 @@ def delete_all_policies(self) -> bool: Manager that registers and calls all policies created by data.all modules and that need to be deleted for consumption roles and team roles """ - try: - for Policy in self.initializedPolicies: - policy_name = Policy.generate_policy_name() + for policy_manager in self.initializedPolicies: + policy_name_list = policy_manager.get_managed_policies() + + # Check if policy with old naming format exists + if not policy_name_list: + old_managed_policy_name = policy_manager.generate_old_policy_name() + if policy_manager.check_if_policy_exists(policy_name=old_managed_policy_name): + policy_name_list.append(old_managed_policy_name) + + for policy_name in policy_name_list: logger.info(f'Deleting policy {policy_name}') - if Policy.check_if_policy_attached(): - IAM.detach_policy_from_role( - account_id=self.account, region=self.region, role_name=self.role_name, policy_name=policy_name - ) - if Policy.check_if_policy_exists(): - IAM.delete_managed_policy_non_default_versions( - account_id=self.account, region=self.region, policy_name=policy_name - ) - IAM.delete_managed_policy_by_name( - account_id=self.account, region=self.region, policy_name=policy_name - ) - except Exception as e: - raise e + try: + if policy_manager.check_if_policy_attached(policy_name=policy_name): + IAM.detach_policy_from_role( + account_id=self.account, + region=self.region, + role_name=self.role_name, + policy_name=policy_name, + ) + if policy_manager.check_if_policy_exists(policy_name=policy_name): + IAM.delete_managed_policy_non_default_versions( + account_id=self.account, region=self.region, policy_name=policy_name + ) + IAM.delete_managed_policy_by_name( + account_id=self.account, region=self.region, policy_name=policy_name + ) + except Exception as e: + logger.error(f'Error while deleting managed policies due to: {e}') + raise e return True def get_all_policies(self) -> List[dict]: @@ -153,13 +190,22 @@ def get_all_policies(self) -> List[dict]: need to be listed for consumption roles and team roles """ all_policies = [] - for Policy in self.initializedPolicies: - policy_dict = { - 'policy_name': Policy.generate_policy_name(), - 'policy_type': Policy.policy_type, - 'exists': Policy.check_if_policy_exists(), - 'attached': Policy.check_if_policy_attached(), - } - all_policies.append(policy_dict) - logger.info(f'All policies currently added to role {str(all_policies)}') + for policy_manager in self.initializedPolicies: + policy_name_list = policy_manager.get_managed_policies() + + # Check if policy with old naming format exists + if not policy_name_list: + old_managed_policy_name = policy_manager.generate_old_policy_name() + if policy_manager.check_if_policy_exists(policy_name=old_managed_policy_name): + policy_name_list.append(old_managed_policy_name) + + for policy_name in policy_name_list: + policy_dict = { + 'policy_name': policy_name, + 'policy_type': policy_manager.policy_type, + 'exists': policy_manager.check_if_policy_exists(policy_name=policy_name), + 'attached': policy_manager.check_if_policy_attached(policy_name=policy_name), + } + all_policies.append(policy_dict) + logger.info(f'All policies currently added to role: {self.role_name} are: {str(all_policies)}') return all_policies diff --git a/backend/dataall/modules/redshift_datasets/cdk/pivot_role_redshift_policy.py b/backend/dataall/modules/redshift_datasets/cdk/pivot_role_redshift_policy.py index 66c52b89c..b2ad8d9ab 100644 --- a/backend/dataall/modules/redshift_datasets/cdk/pivot_role_redshift_policy.py +++ b/backend/dataall/modules/redshift_datasets/cdk/pivot_role_redshift_policy.py @@ -3,7 +3,7 @@ from dataall.base import db from dataall.base.aws.sts import SessionHelper -from dataall.base.utils.iam_policy_utils import split_policy_with_resources_in_statements +from dataall.base.utils.iam_cdk_utils import process_and_split_policy_with_resources_in_statements from dataall.core.environment.cdk.pivot_role_stack import PivotRoleStatementSet from dataall.modules.redshift_datasets.db.redshift_connection_repositories import RedshiftConnectionRepository from dataall.modules.redshift_datasets.aws.redshift_serverless import redshift_serverless_client @@ -81,18 +81,17 @@ def get_statements(self): workgroup_arns = [ rs_client.get_workgroup_arn(workgroup_name=conn.workgroup) for conn in connections if conn.workgroup ] - additional_statements.extend( - split_policy_with_resources_in_statements( - base_sid='RedshiftData', - effect=iam.Effect.ALLOW, - actions=[ - 'redshift-data:ListSchemas', - 'redshift-data:ListTables', - 'redshift-data:ExecuteStatement', - 'redshift-data:DescribeTable', - ], - resources=cluster_arns + workgroup_arns, - ) + redshift_data_statements = process_and_split_policy_with_resources_in_statements( + base_sid='RedshiftData', + effect=iam.Effect.ALLOW.value, + actions=[ + 'redshift-data:ListSchemas', + 'redshift-data:ListTables', + 'redshift-data:ExecuteStatement', + 'redshift-data:DescribeTable', + ], + resources=cluster_arns + workgroup_arns, ) + additional_statements.extend(redshift_data_statements) return base_statements + additional_statements diff --git a/backend/dataall/modules/redshift_datasets_shares/cdk/pivot_role_redshift_data_sharing_policy.py b/backend/dataall/modules/redshift_datasets_shares/cdk/pivot_role_redshift_data_sharing_policy.py index c60ce5412..cd0251692 100644 --- a/backend/dataall/modules/redshift_datasets_shares/cdk/pivot_role_redshift_data_sharing_policy.py +++ b/backend/dataall/modules/redshift_datasets_shares/cdk/pivot_role_redshift_data_sharing_policy.py @@ -1,6 +1,6 @@ import os from dataall.base import db -from dataall.base.utils.iam_policy_utils import split_policy_with_resources_in_statements +from dataall.base.utils.iam_cdk_utils import process_and_split_policy_with_resources_in_statements from dataall.core.environment.cdk.pivot_role_stack import PivotRoleStatementSet from dataall.modules.redshift_datasets.db.redshift_connection_repositories import RedshiftConnectionRepository @@ -39,9 +39,9 @@ def get_statements(self): for conn in connections ] additional_statements.extend( - split_policy_with_resources_in_statements( + process_and_split_policy_with_resources_in_statements( base_sid='RedshiftDataShare', - effect=iam.Effect.ALLOW, + effect=iam.Effect.ALLOW.value, actions=['redshift:AuthorizeDataShare'], resources=source_datashare_arns, ) diff --git a/backend/dataall/modules/s3_datasets/cdk/pivot_role_datasets_policy.py b/backend/dataall/modules/s3_datasets/cdk/pivot_role_datasets_policy.py index e09360fc6..b18fe1646 100644 --- a/backend/dataall/modules/s3_datasets/cdk/pivot_role_datasets_policy.py +++ b/backend/dataall/modules/s3_datasets/cdk/pivot_role_datasets_policy.py @@ -1,8 +1,8 @@ import os from dataall.base import db -from dataall.base.utils.iam_policy_utils import ( - split_policy_with_resources_in_statements, - split_policy_with_mutiple_value_condition_in_statements, +from dataall.base.utils.iam_cdk_utils import ( + process_and_split_policy_with_resources_in_statements, + process_and_split_policy_with_conditions_in_statements, ) from dataall.core.environment.cdk.pivot_role_stack import PivotRoleStatementSet from dataall.modules.s3_datasets.db.dataset_repositories import DatasetRepository @@ -153,9 +153,9 @@ def get_statements(self): imported_kms_alias.append(f'alias/{dataset.KmsAlias}') if imported_buckets: - dataset_statement = split_policy_with_resources_in_statements( + dataset_statements = process_and_split_policy_with_resources_in_statements( base_sid='ImportedDatasetBuckets', - effect=iam.Effect.ALLOW, + effect=iam.Effect.ALLOW.value, actions=[ 's3:List*', 's3:GetBucket*', @@ -168,11 +168,11 @@ def get_statements(self): ], resources=imported_buckets, ) - statements.extend(dataset_statement) + statements.extend(dataset_statements) if imported_kms_alias: - kms_statement = split_policy_with_mutiple_value_condition_in_statements( + kms_statements = process_and_split_policy_with_conditions_in_statements( base_sid='KMSImportedDataset', - effect=iam.Effect.ALLOW, + effect=iam.Effect.ALLOW.value, actions=[ 'kms:Decrypt', 'kms:Encrypt', @@ -190,6 +190,6 @@ def get_statements(self): 'values': imported_kms_alias, }, ) - statements.extend(kms_statement) + statements.extend(kms_statements) return statements diff --git a/backend/dataall/modules/s3_datasets_shares/services/s3_share_managed_policy_service.py b/backend/dataall/modules/s3_datasets_shares/services/s3_share_managed_policy_service.py index ec53e74d9..645f540d1 100644 --- a/backend/dataall/modules/s3_datasets_shares/services/s3_share_managed_policy_service.py +++ b/backend/dataall/modules/s3_datasets_shares/services/s3_share_managed_policy_service.py @@ -1,5 +1,13 @@ import json +from typing import Any, List, Dict + from dataall.base.aws.iam import IAM +from dataall.base.aws.service_quota import ServiceQuota +from dataall.base.db.exceptions import AWSServiceQuotaExceeded +from dataall.base.utils.iam_policy_utils import ( + split_policy_statements_in_chunks, + split_policy_with_resources_in_statements, +) from dataall.base.utils.naming_convention import NamingConventionService, NamingConventionPattern from dataall.core.environment.services.managed_iam_policies import ManagedPolicy import logging @@ -18,6 +26,9 @@ EMPTY_STATEMENT_SID = 'EmptyStatement' S3_ALLOWED_ACTIONS = ['s3:List*', 's3:Describe*', 's3:GetObject'] +IAM_SERVICE_NAME = 'AWS Identity and Access Management (IAM)' +IAM_SERVICE_QUOTA_NAME = 'Managed policies per role' +DEFAULT_MAX_ATTACHABLE_MANAGED_POLICIES_ACCOUNT = 10 class S3SharePolicyService(ManagedPolicy): @@ -27,14 +38,40 @@ def __init__(self, role_name, account, region, environmentUri, resource_prefix): self.region = region self.environmentUri = environmentUri self.resource_prefix = resource_prefix + self.policy_version_map = {} # Policy version map helps while updating policies + self.total_s3_stmts: List[Any] = [] + self.total_s3_kms_stmts: List[Any] = [] + self.total_s3_access_point_stmts: List[Any] = [] + self.total_s3_access_point_kms_stmts: List[Any] = [] + + def initialize_statements(self): + log.info('Extracting policy statement from all managed policies') + share_managed_policies_name_list = self.get_managed_policies() + + for share_managed_policy in share_managed_policies_name_list: + version_id, policy_document = IAM.get_managed_policy_default_version( + account_id=self.account, region=self.region, policy_name=share_managed_policy + ) + self.policy_version_map[share_managed_policy] = version_id + s3_statements, s3_kms_statements, s3_access_point_statements, s3_kms_access_point_statements = ( + S3SharePolicyService._get_segregated_policy_statements_from_policy(policy_document) + ) + self.total_s3_stmts.extend(s3_statements) + self.total_s3_kms_stmts.extend(s3_kms_statements) + self.total_s3_access_point_stmts.extend(s3_access_point_statements) + self.total_s3_access_point_kms_stmts.extend(s3_kms_access_point_statements) + + log.debug(f'Total S3 Bucket sharing statements: {self.total_s3_stmts}') + log.debug(f'Total KMS Bucket sharing statements : {self.total_s3_kms_stmts}') + log.debug(f'Total S3 Access-point sharing statements : {self.total_s3_access_point_stmts}') + log.debug(f'Total KMS Access-point sharing statements : {self.total_s3_access_point_kms_stmts}') @property def policy_type(self) -> str: return 'SharePolicy' - def generate_policy_name(self) -> str: - # In this case it is not possible to build a too long policy because the IAM role can be max 64 chars - # However it is good practice to use the standard utility to build the name + def generate_old_policy_name(self) -> str: + # This function should be deprecated and removed in the future return NamingConventionService( target_label=f'env-{self.environmentUri}-share-policy', target_uri=self.role_name, @@ -42,10 +79,30 @@ def generate_policy_name(self) -> str: resource_prefix=self.resource_prefix, ).build_compliant_name() + def generate_base_policy_name(self) -> str: + """ + Returns the base name of managed policies. This base name is without the index. + build_compliant_name_with_index() function generated the name of the policy considering the length needed for index. + """ + return NamingConventionService( + target_label=f'env-{self.environmentUri}-share-policy', + target_uri=self.role_name, + pattern=NamingConventionPattern.IAM_POLICY, + resource_prefix=self.resource_prefix, + ).build_compliant_name_with_index() + + def generate_indexed_policy_name(self, index: int = 0) -> str: + return NamingConventionService( + target_label=f'env-{self.environmentUri}-share-policy', + target_uri=self.role_name, + pattern=NamingConventionPattern.IAM_POLICY, + resource_prefix=self.resource_prefix, + ).build_compliant_name_with_index(index) + def generate_empty_policy(self) -> dict: return { 'Version': '2012-10-17', - 'Statement': [{'Sid': EMPTY_STATEMENT_SID, 'Effect': 'Allow', 'Action': 'none:null', 'Resource': '*'}], + 'Statement': [{'Sid': EMPTY_STATEMENT_SID, 'Effect': 'Allow', 'Action': ['none:null'], 'Resource': ['*']}], } @staticmethod @@ -55,95 +112,48 @@ def remove_empty_statement(policy_doc: dict, statement_sid: str) -> dict: policy_doc['Statement'].pop(statement_index) return policy_doc - def add_missing_resources_to_policy_statement( - self, resource_type: str, target_resources: list, statement_sid: str, policy_document: dict - ): - """ - Checks if the resources are in the existing statement. Otherwise, it will add it. - :param target_resources: list - :param existing_policy_statement: dict - :return - """ - policy_name = self.generate_policy_name() - policy_actions = S3_ALLOWED_ACTIONS if resource_type == 's3' else [f'{resource_type}:*'] - index = self._get_statement_by_sid(policy_document, statement_sid) - if index is None: - log.info(f'{statement_sid} does NOT exists for Managed policy {policy_name} creating statement...') - additional_policy = { - 'Sid': statement_sid, - 'Effect': 'Allow', - 'Action': policy_actions, - 'Resource': target_resources, - } - policy_document['Statement'].append(additional_policy) - else: - # Enforce, that actions are valid - policy_document['Statement'][index]['Action'] = policy_actions - for target_resource in target_resources: - if target_resource not in policy_document['Statement'][index]['Resource']: - log.info( - f'{statement_sid} exists for Managed policy {policy_name} ' - f'but {target_resource} is not included, updating...' - ) - policy_document['Statement'][index]['Resource'].extend([target_resource]) - else: - log.info( - f'{statement_sid} exists for Managed policy {policy_name} ' - f'and {target_resource} is included, skipping...' - ) - - def remove_resource_from_statement(self, target_resources: list, statement_sid: str, policy_document: dict): - policy_name = self.generate_policy_name() - index = self._get_statement_by_sid(policy_document, statement_sid) - log.info(f'Removing {target_resources} from Statement[{index}] in Managed policy {policy_name} ...') - if index is None: - log.info(f'{statement_sid} does NOT exists for Managed policy {policy_name} skipping...') - else: - policy_statement = policy_document['Statement'][index] - for target_resource in target_resources: - if target_resource in policy_statement['Resource']: - log.info( - f'{statement_sid} exists for Managed policy {policy_name} ' - f'and {target_resource} is included, removing...' - ) - policy_statement['Resource'].remove(target_resource) - if len(policy_statement['Resource']) == 0: - log.info(f'No more resources in {statement_sid}, removing statement...') - policy_document['Statement'].pop(index) - if len(policy_document['Statement']) == 0: - log.info(f'No more statements in {policy_document}, adding empty statement...') - empty_policy_document = self.generate_empty_policy() - policy_document['Statement'] = empty_policy_document['Statement'] - @staticmethod - def check_resource_in_policy_statement(target_resources: list, existing_policy_statement: dict) -> bool: + def check_resource_in_policy_statements(target_resources: list, existing_policy_statements: List[Any]) -> bool: """ - Checks if the resources are in the existing policy + Checks if the resources are in the existing policy statements :param target_resources: list - :param existing_policy_statement: dict + :param existing_policy_statements: dict :return True if all target_resources in the existing policy else False """ + policy_resources = [ + resource for statement in existing_policy_statements for resource in statement.get('Resource') + ] for target_resource in target_resources: - if target_resource not in existing_policy_statement['Resource']: + if target_resource not in policy_resources: return False return True @staticmethod - def check_s3_actions_in_policy_statement(existing_policy_statement: dict) -> (bool, str, str): + def check_s3_actions_in_policy_statements(existing_policy_statements: List[Any]) -> (bool, str, str): """ - Checks if all required s3 actions are allowed in the existing policy and there is no unallowed actions - :param existing_policy_statement: - :return: bool, allowed missing actions string, not allowed actions string + Checks if all required s3 actions are allowed in the existing policy and there is no disallowed actions + :param existing_policy_statements: + :return: List[{ bool, allowed missing actions string, not allowed actions string }] """ - statement_actions = set(existing_policy_statement['Action']) - allowed_actions = set(S3_ALLOWED_ACTIONS) - missing_actions = allowed_actions - statement_actions - extra_actions = statement_actions - allowed_actions - return ( - not (missing_actions or extra_actions), - ','.join(missing_actions), - ','.join(extra_actions), - ) + s3_actions_checker_dict = {} + for statement in existing_policy_statements: + statement_actions = set(statement.get('Action')) + allowed_actions = set(S3_ALLOWED_ACTIONS) + missing_actions = allowed_actions - statement_actions + extra_actions = statement_actions - allowed_actions + s3_actions_checker_dict[statement.get('Sid')] = { + 'policy_check': (missing_actions or extra_actions), + 'missing_permissions': ','.join(missing_actions), + 'extra_permissions': ','.join(extra_actions), + } + return s3_actions_checker_dict + + @staticmethod + def check_if_sid_exists(sid: str, statements): + for statement in statements: + if sid in statement.get('Sid', ''): + return True + return False @staticmethod def _get_statement_by_sid(policy, sid): @@ -153,7 +163,6 @@ def _get_statement_by_sid(policy, sid): return None # Backwards compatibility - def create_managed_policy_from_inline_and_delete_inline(self): """ For existing consumption and team roles, the IAM managed policy won't be created. @@ -161,52 +170,473 @@ def create_managed_policy_from_inline_and_delete_inline(self): Finally, delete the old obsolete inline policies from the role """ try: - policy_document = self._generate_managed_policy_from_inline_policies() - log.info(f'Creating policy from inline backwards compatibility. Policy = {str(policy_document)}') - policy_arn = IAM.create_managed_policy( - self.account, self.region, self.generate_policy_name(), json.dumps(policy_document) + policy_statements = self._generate_managed_policy_statements_from_inline_policies() + log.info( + f'Creating policy from inline backwards compatibility. Policy Statements = {str(policy_statements)}' ) - + policy_arns = self._create_indexed_managed_policies(policy_statements) # Delete obsolete inline policies log.info(f'Deleting {OLD_IAM_ACCESS_POINT_ROLE_POLICY} and {OLD_IAM_S3BUCKET_ROLE_POLICY}') self._delete_old_inline_policies() except Exception as e: raise Exception(f'Error creating policy from inline policies: {e}') - return policy_arn + return policy_arns + + # Backwards compatibility + def create_managed_indexed_policy_from_managed_policy_delete_old_policy(self): + """ + Previously, only one managed policy was created for a role. + Convert this old managed policy into indexed policies by splitting statements into chunks + After converting and splitting, delete the old managed policy + """ + old_managed_policy_name = self.generate_old_policy_name() + log.info( + f'Converting old managed policy with name: {old_managed_policy_name} to indexed managed policy with index: 0' + ) + policy_document = IAM.get_managed_policy_document_by_name( + account_id=self.account, region=self.region, policy_name=old_managed_policy_name + ) + + if not policy_document: + raise Exception('Failed to fetch policy document while converting to indexed managed policy') + + s3_statements, s3_kms_statements, s3_access_point_statements, s3_kms_access_point_statements = ( + S3SharePolicyService._get_segregated_policy_statements_from_policy(policy_document) + ) + + log.debug(f'Total S3 Bucket sharing statements: {s3_statements}') + log.debug(f'Total KMS Bucket sharing statements : {s3_kms_statements}') + log.debug(f'Total S3 Access-point sharing statements : {s3_access_point_statements}') + log.debug(f'Total KMS Access-point sharing statements : {s3_kms_access_point_statements}') + + policy_statements = [] + if len(s3_statements + s3_access_point_statements) > 0: + existing_bucket_s3_statements = self._split_and_generate_statement_chunks( + statements_s3=s3_statements, statements_kms=s3_kms_statements, sid=IAM_S3_BUCKETS_STATEMENT_SID + ) + existing_bucket_s3_access_point_statement = self._split_and_generate_statement_chunks( + statements_s3=s3_access_point_statements, + statements_kms=s3_kms_access_point_statements, + sid=IAM_S3_ACCESS_POINTS_STATEMENT_SID, + ) + policy_statements = existing_bucket_s3_statements + existing_bucket_s3_access_point_statement + + log.info( + f'Found policy statements for existing managed policy. Number of policy statements after splitting: {len(policy_statements)}' + ) + self._create_indexed_managed_policies(policy_statements) + + if self.check_if_policy_attached(policy_name=old_managed_policy_name): + IAM.detach_policy_from_role( + account_id=self.account, + region=self.region, + role_name=self.role_name, + policy_name=old_managed_policy_name, + ) + + IAM.delete_managed_policy_non_default_versions( + account_id=self.account, region=self.region, policy_name=old_managed_policy_name + ) + IAM.delete_managed_policy_by_name( + account_id=self.account, region=self.region, policy_name=old_managed_policy_name + ) + + def merge_statements_and_update_policies( + self, target_sid: str, target_s3_statements: List[Any], target_s3_kms_statements: List[Any] + ): + """ + This method is responsible for merging policy statements, re-generating chunks consisting of statements. + Creates new policies (if needed) and then updates existing policies with statement chunks. + Based on target_sid: + 1. This method merges all the S3 statments + 2. Splits the policy into policy chunks, where each chunk is <= size of the policy ( this is approximately true ) + 3. Check if there are any missing policies and create them + 4. Check if extra policies are required and also checks if those policies can be attached to the role (At the time of writing, IAM role has limit of 10 managed policies and can be increased to 20 ) + 5. Once policies are created, fill/update the policies with the policy chunks + 6. Delete ( if any ) extra policies which are remaining + """ + share_managed_policies_name_list = self.get_managed_policies() + total_s3_iam_policy_stmts: List[Dict] = [] + total_s3_iam_policy_kms_stmts: List[Dict] = [] + total_s3_iam_policy_access_point_stmts: List[Dict] = [] + total_s3_iam_policy_access_point_kms_stmts: List[Dict] = [] + + if target_sid == IAM_S3_BUCKETS_STATEMENT_SID: + total_s3_iam_policy_stmts = target_s3_statements + total_s3_iam_policy_kms_stmts = target_s3_kms_statements + total_s3_iam_policy_access_point_stmts.extend(self.total_s3_access_point_stmts) + total_s3_iam_policy_access_point_kms_stmts.extend(self.total_s3_access_point_kms_stmts) + else: + total_s3_iam_policy_access_point_stmts = target_s3_statements + total_s3_iam_policy_access_point_kms_stmts = target_s3_kms_statements + total_s3_iam_policy_stmts.extend(self.total_s3_stmts) + total_s3_iam_policy_kms_stmts.extend(self.total_s3_kms_stmts) + + aggregated_iam_policy_statements = ( + total_s3_iam_policy_stmts + + total_s3_iam_policy_kms_stmts + + total_s3_iam_policy_access_point_stmts + + total_s3_iam_policy_access_point_kms_stmts + ) + log.info(f'Total number of policy statements after merging: {len(aggregated_iam_policy_statements)}') + + if len(aggregated_iam_policy_statements) == 0: + log.info('Attaching empty policy statement') + empty_policy = self.generate_empty_policy() + log.info(empty_policy['Statement']) + aggregated_iam_policy_statements = empty_policy['Statement'] + + policy_document_chunks = split_policy_statements_in_chunks(aggregated_iam_policy_statements) + log.info(f'Number of policy chunks created: {len(policy_document_chunks)}') + log.debug(policy_document_chunks) + + log.info('Checking if there are any missing policies.') + # Check if there are policies which do not exist but should have existed + current_policy_indexes = [int(policy[-1]) for policy in share_managed_policies_name_list] + integer_indexes = list(range(0, len(share_managed_policies_name_list))) + missing_policies_indexes = [index for index in integer_indexes if index not in current_policy_indexes] + if len(missing_policies_indexes) > 0: + log.info(f'Creating missing policies for indexes: {missing_policies_indexes}') + self._create_empty_policies_with_indexes(indexes=missing_policies_indexes) + + # Check if managed policies can be attached to target requester role and new service policies do not exceed service quota limit + log.info('Checking service quota limit for number of managed policies which can be attached to role') + self._check_iam_managed_policy_attachment_limit(policy_document_chunks) + + # Check if the number of policies required are greater than currently present + if len(policy_document_chunks) > len(share_managed_policies_name_list): + additional_policy_indexes = list(range(len(share_managed_policies_name_list), len(policy_document_chunks))) + log.info( + f'Number of policies needed are more than existing number of policies. Creating policies with indexes: {additional_policy_indexes}' + ) + self._create_empty_policies_with_indexes(indexes=additional_policy_indexes) + + updated_share_managed_policies_name_list = self.get_managed_policies() + + log.info('Updating policy_version_map for any newly created policies') + # Update the dict tracking the policy version for new policies which were created + for managed_policy_name in updated_share_managed_policies_name_list: + if managed_policy_name not in self.policy_version_map: + self.policy_version_map[managed_policy_name] = 'v1' + + for index, statement_chunk in enumerate(policy_document_chunks): + policy_document = self._generate_policy_document_from_statements(statement_chunk) + # If statement length is greater than 1 then check if has empty statements sid and remove it + if len(policy_document.get('Statement')) > 1: + log.info('Removing empty policy statements') + policy_document = S3SharePolicyService.remove_empty_statement( + policy_doc=policy_document, statement_sid=EMPTY_STATEMENT_SID + ) + policy_name = self.generate_indexed_policy_name(index=index) + log.debug(f'Policy document for policy {policy_name}: {policy_document}') + IAM.update_managed_policy_default_version( + self.account, + self.region, + policy_name, + self.policy_version_map.get(policy_name, 'v1'), + json.dumps(policy_document), + ) - def _generate_managed_policy_from_inline_policies(self): + # Deleting excess policies + if len(policy_document_chunks) < len(updated_share_managed_policies_name_list): + excess_policies_indexes = list( + range(len(policy_document_chunks), len(updated_share_managed_policies_name_list)) + ) + log.info(f'Found more policies than needed. Deleting policies with indexes: {excess_policies_indexes}') + self._delete_policies_with_indexes(indexes=excess_policies_indexes) + + def _delete_policies_with_indexes(self, indexes): + for index in indexes: + policy_name = self.generate_indexed_policy_name(index=index) + log.info(f'Deleting policy {policy_name}') + # Checking if policy exist or not first before deleting + if self.check_if_policy_exists(policy_name=policy_name): + if self.check_if_policy_attached(policy_name=policy_name): + IAM.detach_policy_from_role( + account_id=self.account, region=self.region, role_name=self.role_name, policy_name=policy_name + ) + IAM.delete_managed_policy_non_default_versions( + account_id=self.account, region=self.region, policy_name=policy_name + ) + IAM.delete_managed_policy_by_name(account_id=self.account, region=self.region, policy_name=policy_name) + else: + log.info(f'Policy with name {policy_name} does not exist') + + def _create_empty_policies_with_indexes(self, indexes): + for index in indexes: + policy_name = self.generate_indexed_policy_name(index=index) + policy_document = self.generate_empty_policy() + IAM.create_managed_policy(self.account, self.region, policy_name, json.dumps(policy_document)) + + def _create_indexed_managed_policies(self, policy_statements: List[Dict]): + if not policy_statements: + log.info( + 'No policy statements supplied while creating indexed managed policies. Creating an empty policy statement.' + ) + empty_policy = self.generate_empty_policy() + policy_statements = empty_policy['Statement'] + + policy_document_chunks = split_policy_statements_in_chunks(policy_statements) + log.info(f'Number of Policy chunks made: {len(policy_document_chunks)}') + + log.info( + 'Checking service quota limit for number of managed policies which can be attached to role before converting' + ) + self._check_iam_managed_policy_attachment_limit(policy_document_chunks) + + policy_arns = [] + for index, statement_chunk in enumerate(policy_document_chunks): + policy_document = self._generate_policy_document_from_statements(statement_chunk) + indexed_policy_name = self.generate_indexed_policy_name(index=index) + policy_arns.append( + IAM.create_managed_policy(self.account, self.region, indexed_policy_name, json.dumps(policy_document)) + ) + + return policy_arns + + def _check_iam_managed_policy_attachment_limit(self, policy_document_chunks): + number_of_policies_needed = len(policy_document_chunks) + policies_present = self.get_managed_policies() + managed_policies_attached_to_role = IAM.get_attached_managed_policies_to_role( + account_id=self.account, region=self.region, role_name=self.role_name + ) + number_of_non_share_managed_policies_attached_to_role = len( + [policy for policy in managed_policies_attached_to_role if policy not in policies_present] + ) + log.info( + f'number_of_non_share_managed_policies_attached_to_role: {number_of_non_share_managed_policies_attached_to_role}' + ) + + managed_iam_policy_quota = self._get_managed_policy_quota() + if number_of_policies_needed + number_of_non_share_managed_policies_attached_to_role > managed_iam_policy_quota: + # Send an email notification to the requestors to increase the quota + log.error( + f'Number of policies which can be attached to the role is more than the service quota limit: {managed_iam_policy_quota}' + ) + raise AWSServiceQuotaExceeded( + action='_check_iam_managed_policy_attachment_limit', + message=f'Number of policies which can be attached to the role is more than the service quota limit: {managed_iam_policy_quota}', + ) + + log.info(f'Role: {self.role_name} has capacity to attach managed policies') + + def _get_managed_policy_quota(self): + # Get the number of managed policies which can be attached to the IAM role + service_quota_client = ServiceQuota(account_id=self.account, region=self.region) + service_code_list = service_quota_client.list_services() + service_code = None + for service in service_code_list: + if service.get('ServiceName') == IAM_SERVICE_NAME: + service_code = service.get('ServiceCode') + break + + service_quota_code = None + if service_code: + service_quota_codes = service_quota_client.list_service_quota(service_code=service_code) + for service_quota_cd in service_quota_codes: + if service_quota_cd.get('QuotaName') == IAM_SERVICE_QUOTA_NAME: + service_quota_code = service_quota_cd.get('QuotaCode') + break + + managed_iam_policy_quota = None + if service_quota_code: + managed_iam_policy_quota = service_quota_client.get_service_quota_value( + service_code=service_code, service_quota_code=service_quota_code + ) + + if managed_iam_policy_quota is None: + managed_iam_policy_quota = DEFAULT_MAX_ATTACHABLE_MANAGED_POLICIES_ACCOUNT + + return managed_iam_policy_quota + + @staticmethod + def _get_segregated_policy_statements_from_policy(policy_document): + """Function to split the policy document and collect policy statements relating to S3 & KMS for bucket and access point shares + policy_document: IAM policy document + returns: s3_statements, s3_kms_statements, s3_access_point_statements, s3_kms_access_point_statements + """ + policy_statements = policy_document.get('Statement', []) + s3_statements = [ + policy_stmt + for policy_stmt in policy_statements + if f'{IAM_S3_BUCKETS_STATEMENT_SID}S3' in policy_stmt.get('Sid', '') + ] + s3_kms_statements = [ + policy_stmt + for policy_stmt in policy_statements + if f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS' in policy_stmt.get('Sid', '') + ] + s3_access_point_statements = [ + policy_stmt + for policy_stmt in policy_statements + if f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3' in policy_stmt.get('Sid', '') + ] + s3_kms_access_point_statements = [ + policy_stmt + for policy_stmt in policy_statements + if f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS' in policy_stmt.get('Sid', '') + ] + + return s3_statements, s3_kms_statements, s3_access_point_statements, s3_kms_access_point_statements + + def add_resources_and_generate_split_statements(self, statements, target_resources, sid, resource_type): + """ + Method which adds target resources to the statements & splits the statements in chunks + returns : policy statements chunks + """ + # Using _convert_to_array to convert to array if single resource is present and its not in array + s3_statements_resources: List[str] = [ + resource + for statement in statements + for resource in S3SharePolicyService._convert_to_array(str, statement.get('Resource')) + ] + for target_resource in target_resources: + if target_resource not in s3_statements_resources: + s3_statements_resources.append(target_resource) + + if len(s3_statements_resources) == 0: + return [] + + statement_chunks = split_policy_with_resources_in_statements( + base_sid=sid, + effect='Allow', + actions=S3_ALLOWED_ACTIONS if resource_type == 's3' else [f'{resource_type}:*'], + resources=s3_statements_resources, + ) + return statement_chunks + + def remove_resources_and_generate_split_statements(self, statements, target_resources, sid, resource_type): + """ + Method which removes target resources from the statements & splits the statements in chunks + returns : policy statements chunks + """ + s3_statements_resources = [ + resource + for statement in statements + for resource in S3SharePolicyService._convert_to_array(str, statement.get('Resource')) + ] + s3_statements_resources = [resource for resource in s3_statements_resources if resource not in target_resources] + + if len(s3_statements_resources) == 0: + return [] + + statement_chunks = split_policy_with_resources_in_statements( + base_sid=sid, + effect='Allow', + actions=S3_ALLOWED_ACTIONS if resource_type == 's3' else [f'{resource_type}:*'], + resources=s3_statements_resources, + ) + return statement_chunks + + # If item is of item type i.e. single instance if present, then wrap in an array. + # This is helpful at places where array is required even if one element is present + @staticmethod + def _convert_to_array(item_type, item): + if isinstance(item, item_type): + return [item] + return item + + def _generate_policy_document_from_statements(self, statements: List[Dict]): + """ + Helper method to generate a policy from statements + """ + if statements is None: + raise Exception('Provide valid statements while generating policy document from statement') + return {'Version': '2012-10-17', 'Statement': statements} + + def _generate_managed_policy_statements_from_inline_policies(self): """ Get resources shared in previous inline policies If there are already shared resources, add them to the empty policy and remove the fake statement return: IAM policy document """ - existing_bucket_s3, existing_bucket_kms = self._get_policy_resources_from_inline_policy( + existing_bucket_s3_resources, existing_bucket_kms_resources = self._get_policy_resources_from_inline_policy( OLD_IAM_S3BUCKET_ROLE_POLICY ) - existing_access_points_s3, existing_access_points_kms = self._get_policy_resources_from_inline_policy( - OLD_IAM_ACCESS_POINT_ROLE_POLICY + existing_access_points_s3_resources, existing_access_points_kms_resources = ( + self._get_policy_resources_from_inline_policy(OLD_IAM_ACCESS_POINT_ROLE_POLICY) ) - log.info(f'Back-filling S3BUCKET sharing resources: S3={existing_bucket_s3}, KMS={existing_bucket_kms}') log.info( - f'Back-filling S3ACCESS POINTS sharing resources: S3={existing_access_points_s3}, KMS={existing_access_points_kms}' + f'Back-filling S3BUCKET sharing resources: S3={existing_bucket_s3_resources}, KMS={existing_bucket_kms_resources}' + ) + log.info( + f'Back-filling S3ACCESS POINTS sharing resources: S3={existing_access_points_s3_resources}, KMS={existing_access_points_kms_resources}' + ) + bucket_s3_statement, bucket_kms_statement = self._generate_statement_from_inline_resources( + existing_bucket_s3_resources, existing_bucket_kms_resources, IAM_S3_BUCKETS_STATEMENT_SID ) - if len(existing_bucket_s3 + existing_access_points_s3) > 0: - new_policy = {'Version': '2012-10-17', 'Statement': []} - updated_policy = self._update_policy_resources_from_inline_policy( - policy=new_policy, - statement_sid=IAM_S3_BUCKETS_STATEMENT_SID, - existing_s3=existing_bucket_s3, - existing_kms=existing_bucket_kms, + access_points_s3_statement, access_points_kms_statement = self._generate_statement_from_inline_resources( + existing_access_points_s3_resources, + existing_access_points_kms_resources, + IAM_S3_ACCESS_POINTS_STATEMENT_SID, + ) + + policy_statements = [] + if len(existing_bucket_s3_resources + existing_access_points_s3_resources) > 0: + # Split the statements in chunks + existing_bucket_s3_statements = self._split_and_generate_statement_chunks( + statements_s3=bucket_s3_statement, statements_kms=bucket_kms_statement, sid=IAM_S3_BUCKETS_STATEMENT_SID ) - updated_policy = self._update_policy_resources_from_inline_policy( - policy=updated_policy, - statement_sid=IAM_S3_ACCESS_POINTS_STATEMENT_SID, - existing_s3=existing_access_points_s3, - existing_kms=existing_access_points_kms, + existing_bucket_s3_access_point_statements = self._split_and_generate_statement_chunks( + statements_s3=access_points_s3_statement, + statements_kms=access_points_kms_statement, + sid=IAM_S3_ACCESS_POINTS_STATEMENT_SID, ) - else: - updated_policy = self.generate_empty_policy() - return updated_policy + policy_statements = existing_bucket_s3_statements + existing_bucket_s3_access_point_statements + + log.debug(f'Created policy statements with length: {len(policy_statements)}') + return policy_statements + + def _split_and_generate_statement_chunks(self, statements_s3, statements_kms, sid): + """ + Helper method to aggregate S3 and KMS statements for Bucket and Accesspoint shares and split into chunks + """ + aggregate_statements = [] + if len(statements_s3) > 0: + statement_resources = [ + resource + for statement in statements_s3 + for resource in S3SharePolicyService._convert_to_array(str, statement.get('Resource')) + ] + aggregate_statements.extend( + split_policy_with_resources_in_statements( + base_sid=f'{sid}S3', + effect='Allow', + actions=['s3:List*', 's3:Describe*', 's3:GetObject'], + resources=statement_resources, + ) + ) + if len(statements_kms) > 0: + statement_resources = [ + resource + for statement in statements_kms + for resource in S3SharePolicyService._convert_to_array(str, statement.get('Resource')) + ] + aggregate_statements.extend( + split_policy_with_resources_in_statements( + base_sid=f'{sid}KMS', effect='Allow', actions=['kms:*'], resources=statement_resources + ) + ) + return aggregate_statements + + def _generate_statement_from_inline_resources(self, bucket_s3_resources, bucket_kms_resources, base_sid): + bucket_s3_statement = [] + bucket_kms_statement = [] + if len(bucket_s3_resources) > 0: + bucket_s3_statement.append( + { + 'Sid': f'{base_sid}S3', + 'Effect': 'Allow', + 'Action': S3_ALLOWED_ACTIONS, + 'Resource': bucket_s3_resources, + } + ) + if len(bucket_kms_resources) > 0: + bucket_kms_statement.append( + {'Sid': f'{base_sid}KMS', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': bucket_kms_resources} + ) + log.info(f'Generated statement from resources: S3: {bucket_s3_statement}, KMS: {bucket_kms_statement}') + return bucket_s3_statement, bucket_kms_statement def _get_policy_resources_from_inline_policy(self, policy_name): # This function can only be used for backwards compatibility where policies had statement[0] for s3 @@ -224,36 +654,44 @@ def _get_policy_resources_from_inline_policy(self, policy_name): log.error(f'Failed to retrieve the existing policy {policy_name}: {e} ') return [], [] - def _update_policy_resources_from_inline_policy(self, policy, statement_sid, existing_s3, existing_kms): - # This function can only be used for backwards compatibility where policies had statement[0] for s3 - # and statement[1] for KMS permissions - if len(existing_s3) > 0: - additional_policy = { - 'Sid': f'{statement_sid}S3', - 'Effect': 'Allow', - 'Action': ['s3:List*', 's3:Describe*', 's3:GetObject'], - 'Resource': existing_s3, - } - policy['Statement'].append(additional_policy) - if len(existing_kms) > 0: - additional_policy = { - 'Sid': f'{statement_sid}KMS', - 'Effect': 'Allow', - 'Action': ['kms:*'], - 'Resource': existing_kms, - } - policy['Statement'].append(additional_policy) - return policy - def _delete_old_inline_policies(self): for policy_name in [OLD_IAM_S3BUCKET_ROLE_POLICY, OLD_IAM_ACCESS_POINT_ROLE_POLICY]: try: existing_policy = IAM.get_role_policy(self.account, self.region, self.role_name, policy_name) if existing_policy is not None: - log.info(f'Deleting inline policy {policy_name}...') + log.info(f'Deleting inline policy: {policy_name}') IAM.delete_role_policy(self.account, self.region, self.role_name, policy_name) else: pass except Exception as e: log.error(f'Failed to retrieve the existing policy {policy_name}: {e} ') return True + + def process_backwards_compatibility_for_target_iam_roles(self): + """ + Backwards compatibility + we check if a managed share policy exists. If False, the role was introduced to data.all before this update + we create the policy from the inline statements and attach it to the role + """ + log.info('Checking if inline policies are present') + old_managed_policy_name = self.generate_old_policy_name() + old_managed_policy_exists = self.check_if_policy_exists(policy_name=old_managed_policy_name) + share_managed_policies_exist = True if self.get_managed_policies() else False + # If old managed policy doesn't exist and also new managed policies do not exist. + # Then there might be inline policies, convert them to managed indexed policies + if not old_managed_policy_exists and not share_managed_policies_exist: + self.create_managed_policy_from_inline_and_delete_inline() + managed_policies_list = self.get_managed_policies() + self.attach_policies(managed_policies_list) + # End of backwards compatibility + + """ + Backwards compatibility + After 2.6, the IAM managed policies are created with indexes on them. This was made to solve this issue decribed here - https://github.com/data-dot-all/dataall/issues/884 + If an old managed policy exists then + """ + log.info(f'Old managed policy with name {old_managed_policy_name} exists: {old_managed_policy_exists}') + if old_managed_policy_exists: + self.create_managed_indexed_policy_from_managed_policy_delete_old_policy() + managed_policies_list = self.get_managed_policies() + self.attach_policies(managed_policies_list) diff --git a/backend/dataall/modules/s3_datasets_shares/services/s3_share_validator.py b/backend/dataall/modules/s3_datasets_shares/services/s3_share_validator.py index 657afb4ec..644591fec 100644 --- a/backend/dataall/modules/s3_datasets_shares/services/s3_share_validator.py +++ b/backend/dataall/modules/s3_datasets_shares/services/s3_share_validator.py @@ -131,21 +131,35 @@ def _validate_iam_role_and_policy( region=environment.region, resource_prefix=environment.resourcePrefix, ) - for Policy in [ + for policy_manager in [ Policy for Policy in share_policy_manager.initializedPolicies if Policy.policy_type == 'SharePolicy' ]: - # Backwards compatibility + # Backwards compatibility - 1 # we check if a managed share policy exists. If False, the role was introduced to data.all before this update # We create the policy from the inline statements # In this case it could also happen that the role is the Admin of the environment - if not Policy.check_if_policy_exists(): - Policy.create_managed_policy_from_inline_and_delete_inline() + old_managed_policy_name = policy_manager.generate_old_policy_name() + old_managed_policy_exists = policy_manager.check_if_policy_exists(policy_name=old_managed_policy_name) + share_managed_policies_exist = True if policy_manager.get_managed_policies() else False + # If old managed policy doesn't exist and also new managed policies do not exist. + # Then there might be inline policies, convert them to managed indexed policies + if not old_managed_policy_exists and not share_managed_policies_exist: + policy_manager.create_managed_policy_from_inline_and_delete_inline() # End of backwards compatibility - attached = Policy.check_if_policy_attached() - if not attached and not managed and not attachMissingPolicies: + # Backwards compatibility - 2 + # Check if an already existing managed policy is present in old format + # If yes, convert it to the indexed format + old_managed_policy_name = policy_manager.generate_old_policy_name() + if policy_manager.check_if_policy_exists(old_managed_policy_name): + policy_manager.create_managed_indexed_policy_from_managed_policy_delete_old_policy() + # End of backwards compatibility + + unattached = policy_manager.get_policies_unattached_to_role() + if unattached and not managed and not attachMissingPolicies: raise Exception( - f'Required customer managed policy {Policy.generate_policy_name()} is not attached to role {principal_role_name}' + f'Required customer managed policies {policy_manager.get_policies_unattached_to_role()} are not attached to role {principal_role_name}' ) - elif not attached: - Policy.attach_policy() + elif unattached: + managed_policy_list = policy_manager.get_policies_unattached_to_role() + policy_manager.attach_policies(managed_policy_list) diff --git a/backend/dataall/modules/s3_datasets_shares/services/share_managers/__init__.py b/backend/dataall/modules/s3_datasets_shares/services/share_managers/__init__.py index df0af76bf..99b109480 100644 --- a/backend/dataall/modules/s3_datasets_shares/services/share_managers/__init__.py +++ b/backend/dataall/modules/s3_datasets_shares/services/share_managers/__init__.py @@ -1,3 +1,3 @@ -from .s3_access_point_share_manager import S3AccessPointShareManager from .lf_share_manager import LFShareManager +from .s3_access_point_share_manager import S3AccessPointShareManager from .s3_bucket_share_manager import S3BucketShareManager diff --git a/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_access_point_share_manager.py b/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_access_point_share_manager.py index 0c82bbda8..791960ff6 100644 --- a/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_access_point_share_manager.py +++ b/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_access_point_share_manager.py @@ -1,8 +1,11 @@ import logging import json +import time from itertools import count +from typing import List +from warnings import warn - +from dataall.base.db.exceptions import AWSServiceQuotaExceeded from dataall.core.environment.services.environment_service import EnvironmentService from dataall.base.db import utils from dataall.base.aws.sts import SessionHelper @@ -32,8 +35,8 @@ from dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service import ( S3SharePolicyService, IAM_S3_ACCESS_POINTS_STATEMENT_SID, - EMPTY_STATEMENT_SID, ) +from dataall.modules.shares_base.services.share_notification_service import ShareNotificationService from dataall.modules.shares_base.services.shares_enums import PrincipalType from dataall.modules.s3_datasets.db.dataset_models import DatasetStorageLocation, S3Dataset from dataall.modules.shares_base.services.sharing_service import ShareData @@ -165,26 +168,45 @@ def check_target_role_access_policy(self) -> None: kms_client = KmsClient(self.dataset_account_id, self.source_environment.region) kms_key_id = kms_client.get_key_id(key_alias) share_policy_service = S3SharePolicyService( - environmentUri=self.target_environment.environmentUri, + role_name=self.target_requester_IAMRoleName, account=self.target_environment.AwsAccountId, region=self.target_environment.region, - role_name=self.target_requester_IAMRoleName, + environmentUri=self.target_environment.environmentUri, resource_prefix=self.target_environment.resourcePrefix, ) - share_resource_policy_name = share_policy_service.generate_policy_name() + share_policy_service.initialize_statements() - if not share_policy_service.check_if_policy_exists(): - logger.info(f'IAM Policy {share_resource_policy_name} does not exist') - self.folder_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy', share_resource_policy_name)) - return + share_resource_policy_name = share_policy_service.generate_indexed_policy_name(index=0) + is_managed_policies_exists = True if share_policy_service.get_managed_policies() else False - if not share_policy_service.check_if_policy_attached(): - logger.info( - f'IAM Policy {share_resource_policy_name} exists but is not attached to role {self.share.principalRoleName}' + # Checking if managed policies without indexes are present. This is used for backward compatibility + if not is_managed_policies_exists: + warn( + "Convert all your share's requestor policies to managed policies with indexes.", + DeprecationWarning, + stacklevel=2, ) - self.folder_errors.append( - ShareErrorFormatter.dne_error_msg('IAM Policy attached', share_resource_policy_name) + old_managed_policy_name = share_policy_service.generate_old_policy_name() + old_policy_exist = share_policy_service.check_if_policy_exists(policy_name=old_managed_policy_name) + if not old_policy_exist: + logger.info( + f'No managed policy exists for the role: {self.target_requester_IAMRoleName}, Reapply share to create indexed managed policies.' + ) + self.folder_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy', share_resource_policy_name)) + return + else: + logger.info( + f'Old managed policy exists for the role: {self.target_requester_IAMRoleName}. Reapply share to create indexed managed policies.' + ) + self.folder_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy', share_resource_policy_name)) + return + + unattached_policies: List[str] = share_policy_service.get_policies_unattached_to_role() + if len(unattached_policies) > 0: + logger.info( + f'IAM Policies {unattached_policies} exists but are not attached to role {self.share.principalRoleName}' ) + self.folder_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy attached', unattached_policies)) return s3_target_resources = [ @@ -194,101 +216,99 @@ def check_target_role_access_policy(self) -> None: f'arn:aws:s3:{self.dataset_region}:{self.dataset_account_id}:accesspoint/{self.access_point_name}/*', ] - version_id, policy_document = IAM.get_managed_policy_default_version( - self.target_environment.AwsAccountId, self.target_environment.region, share_resource_policy_name - ) - logger.info(f'Policy... {policy_document}') - - s3_statement_index = S3SharePolicyService._get_statement_by_sid( - policy_document, f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3' - ) - - if s3_statement_index is None: - logger.info(f'IAM Policy Statement {IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3 does not exist') + if not S3SharePolicyService.check_if_sid_exists( + f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', share_policy_service.total_s3_access_point_stmts + ): + logger.info( + f'IAM Policy Statement with Sid: {IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3 - where can be 0,1,2.. - does not exist' + ) self.folder_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, - 'IAM Policy Statement', - f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'IAM Policy Statement Sid', + f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', 'S3 Bucket', f'{self.bucket_name}', ) ) - - elif not share_policy_service.check_resource_in_policy_statement( + elif not share_policy_service.check_resource_in_policy_statements( target_resources=s3_target_resources, - existing_policy_statement=policy_document['Statement'][s3_statement_index], + existing_policy_statements=share_policy_service.total_s3_access_point_stmts, ): logger.info( - f'IAM Policy Statement {IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3 does not contain resources {s3_target_resources}' + f'IAM Policy Statement with Sid {IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3 - where can be 0,1,2.. - does not contain resources {s3_target_resources}' ) self.folder_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, - 'IAM Policy Resource', - f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'IAM Policy Resource(s)', + f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', 'S3 Bucket', f'{self.bucket_name}', ) ) else: - policy_check, missing_permissions, extra_permissions = ( - share_policy_service.check_s3_actions_in_policy_statement( - existing_policy_statement=policy_document['Statement'][s3_statement_index] - ) + policy_sid_actions_map = share_policy_service.check_s3_actions_in_policy_statements( + existing_policy_statements=share_policy_service.total_s3_access_point_stmts ) - if not policy_check: - logger.info(f'IAM Policy Statement {IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3 has invalid actions') - if missing_permissions: - self.folder_errors.append( - ShareErrorFormatter.missing_permission_error_msg( - self.target_requester_IAMRoleName, - 'IAM Policy Action', - missing_permissions, - 'S3 Bucket', - f'{self.bucket_name}', + for sid in policy_sid_actions_map: + policy_check = policy_sid_actions_map[sid].get('policy_check') + missing_permissions = policy_sid_actions_map[sid].get('missing_permissions') + extra_permissions = policy_sid_actions_map[sid].get('extra_permissions') + # Check if policy violations are present + if policy_check: + logger.info(f'IAM Policy Statement {sid} has invalid actions') + if missing_permissions: + self.folder_errors.append( + ShareErrorFormatter.missing_permission_error_msg( + self.target_requester_IAMRoleName, + 'IAM Policy Action', + missing_permissions, + 'S3 Bucket', + f'{self.bucket_name}', + ) ) - ) - if extra_permissions: - self.folder_errors.append( - ShareErrorFormatter.not_allowed_permission_error_msg( - self.target_requester_IAMRoleName, - 'IAM Policy Action', - extra_permissions, - 'S3 Bucket', - f'{self.bucket_name}', + if extra_permissions: + self.folder_errors.append( + ShareErrorFormatter.not_allowed_permission_error_msg( + self.target_requester_IAMRoleName, + 'IAM Policy Action', + extra_permissions, + 'S3 Bucket', + f'{self.bucket_name}', + ) ) - ) if kms_key_id: - kms_statement_index = S3SharePolicyService._get_statement_by_sid( - policy_document, f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS' - ) kms_target_resources = [f'arn:aws:kms:{self.dataset_region}:{self.dataset_account_id}:key/{kms_key_id}'] - if not kms_statement_index: - logger.info(f'IAM Policy Statement {IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS does not exist') + + if not S3SharePolicyService.check_if_sid_exists( + f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', share_policy_service.total_s3_access_point_kms_stmts + ): + logger.info( + f'IAM Policy Statement with Sid: {IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS - where can be 0,1,2.. - does not exist' + ) self.folder_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, 'IAM Policy Statement', - f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', 'KMS Key', f'{kms_key_id}', ) ) - - elif not share_policy_service.check_resource_in_policy_statement( + elif not share_policy_service.check_resource_in_policy_statements( target_resources=kms_target_resources, - existing_policy_statement=policy_document['Statement'][kms_statement_index], + existing_policy_statements=share_policy_service.total_s3_access_point_kms_stmts, ): logger.info( - f'IAM Policy Statement {IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS does not contain resources {kms_target_resources}' + f'IAM Policy Statement {IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS - where can be 0,1,2.. - does not contain resources {kms_target_resources}' ) self.folder_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, 'IAM Policy Resource', - f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', 'KMS Key', f'{kms_key_id}', ) @@ -297,40 +317,22 @@ def check_target_role_access_policy(self) -> None: def grant_target_role_access_policy(self): """ Updates requester IAM role policy to include requested S3 bucket and access point - :return: + :returns: None or raises exception if something fails """ logger.info(f'Grant target role {self.target_requester_IAMRoleName} access policy') share_policy_service = S3SharePolicyService( - environmentUri=self.target_environment.environmentUri, + role_name=self.target_requester_IAMRoleName, account=self.target_environment.AwsAccountId, region=self.target_environment.region, - role_name=self.target_requester_IAMRoleName, + environmentUri=self.target_environment.environmentUri, resource_prefix=self.target_environment.resourcePrefix, ) + # Process all backwards compatibility tasks and convert to indexed policies + share_policy_service.process_backwards_compatibility_for_target_iam_roles() - # Backwards compatibility - # we check if a managed share policy exists. If False, the role was introduced to data.all before this update - # We create the policy from the inline statements and attach it to the role - if not share_policy_service.check_if_policy_exists(): - share_policy_service.create_managed_policy_from_inline_and_delete_inline() - share_policy_service.attach_policy() - # End of backwards compatibility - - if not share_policy_service.check_if_policy_attached(): - if self.share.principalType == PrincipalType.Group.value: - share_policy_service.attach_policy() - else: - consumption_role = EnvironmentService.get_consumption_role( - session=self.session, uri=self.share.principalId - ) - if consumption_role.dataallManaged: - share_policy_service.attach_policy() - - share_resource_policy_name = share_policy_service.generate_policy_name() - version_id, policy_document = IAM.get_managed_policy_default_version( - self.target_account_id, self.target_environment.region, share_resource_policy_name - ) + # Parses all policy documents and extracts s3 and kms statements + share_policy_service.initialize_statements() key_alias = f'alias/{self.dataset.KmsAlias}' kms_client = KmsClient(self.dataset_account_id, self.source_environment.region) @@ -342,32 +344,64 @@ def grant_target_role_access_policy(self): f'arn:aws:s3:{self.dataset_region}:{self.dataset_account_id}:accesspoint/{self.access_point_name}', f'arn:aws:s3:{self.dataset_region}:{self.dataset_account_id}:accesspoint/{self.access_point_name}/*', ] + kms_target_resources = [] + if kms_key_id: + kms_target_resources = [f'arn:aws:kms:{self.dataset_region}:{self.dataset_account_id}:key/{kms_key_id}'] - share_policy_service.add_missing_resources_to_policy_statement( - resource_type='s3', + s3_statements = share_policy_service.total_s3_access_point_stmts + s3_statement_chunks = share_policy_service.add_resources_and_generate_split_statements( + statements=s3_statements, target_resources=s3_target_resources, - statement_sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', - policy_document=policy_document, + sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + resource_type='s3', ) - - share_policy_service.remove_empty_statement(policy_doc=policy_document, statement_sid=EMPTY_STATEMENT_SID) - - if kms_key_id: - kms_target_resources = [f'arn:aws:kms:{self.dataset_region}:{self.dataset_account_id}:key/{kms_key_id}'] - share_policy_service.add_missing_resources_to_policy_statement( - resource_type='kms', - target_resources=kms_target_resources, - statement_sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', - policy_document=policy_document, + logger.info(f'Number of S3 statements created after splitting: {len(s3_statement_chunks)}') + logger.debug(f'S3 statements after adding resources and splitting: {s3_statement_chunks}') + + s3_kms_statements = share_policy_service.total_s3_access_point_kms_stmts + s3_kms_statement_chunks = share_policy_service.add_resources_and_generate_split_statements( + statements=s3_kms_statements, + target_resources=kms_target_resources, + sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + resource_type='kms', + ) + logger.info(f'Number of S3 KMS statements created after splitting: {len(s3_kms_statement_chunks)}') + logger.debug(f'S3 KMS statements after adding resources and splitting: {s3_kms_statement_chunks}') + + try: + share_policy_service.merge_statements_and_update_policies( + target_sid=IAM_S3_ACCESS_POINTS_STATEMENT_SID, + target_s3_statements=s3_statement_chunks, + target_s3_kms_statements=s3_kms_statement_chunks, ) + except AWSServiceQuotaExceeded as e: + error_message = e.message + try: + ShareNotificationService( + session=None, dataset=self.dataset, share=self.share + ).notify_managed_policy_limit_exceeded_action(email_id=self.share.owner) + except Exception as e: + logger.error( + f'Error sending email for notifying that managed policy limit exceeded on role due to: {e}' + ) + finally: + raise Exception(error_message) - IAM.update_managed_policy_default_version( - self.target_account_id, - self.target_environment.region, - share_resource_policy_name, - version_id, - json.dumps(policy_document), - ) + is_unattached_policies = share_policy_service.get_policies_unattached_to_role() + if is_unattached_policies: + logger.info( + f'Found some policies are not attached to the target IAM role: {self.target_requester_IAMRoleName}. Attaching policies now' + ) + if self.share.principalType == PrincipalType.Group.value: + share_managed_policies = share_policy_service.get_managed_policies() + share_policy_service.attach_policies(share_managed_policies) + else: + consumption_role = EnvironmentService.get_consumption_role( + session=self.session, uri=self.share.principalId + ) + if consumption_role.dataallManaged: + share_managed_policies = share_policy_service.get_managed_policies() + share_policy_service.attach_policies(share_managed_policies) def check_access_point_and_policy(self) -> None: """ @@ -486,7 +520,6 @@ def manage_access_point_and_policy(self): self.s3_prefix, perms_to_actions(self.share.permissions, SidType.BucketPolicy), ) - s3_client.attach_access_point_policy( access_point_name=self.access_point_name, policy=json.dumps(access_point_policy) ) @@ -641,34 +674,17 @@ def revoke_target_role_access_policy(self): logger.info('Deleting target role IAM statements...') share_policy_service = S3SharePolicyService( - environmentUri=self.target_environment.environmentUri, + role_name=self.target_requester_IAMRoleName, account=self.target_environment.AwsAccountId, region=self.target_environment.region, - role_name=self.target_requester_IAMRoleName, + environmentUri=self.target_environment.environmentUri, resource_prefix=self.target_environment.resourcePrefix, ) + # Process all backwards compatibility tasks and convert to indexed policies + share_policy_service.process_backwards_compatibility_for_target_iam_roles() - role_arn = IAM.get_role_arn_by_name( - self.target_account_id, self.target_environment.region, self.target_requester_IAMRoleName - ) - - # Backwards compatibility - # we check if a managed share policy exists. If False, the role was introduced to data.all before this update - # We create the policy from the inline statements and attach it to the role - if not share_policy_service.check_if_policy_exists() and role_arn: - share_policy_service.create_managed_policy_from_inline_and_delete_inline() - share_policy_service.attach_policy() - # End of backwards compatibility - - share_resource_policy_name = share_policy_service.generate_policy_name() - - version_id, policy_document = IAM.get_managed_policy_default_version( - self.target_account_id, self.target_environment.region, share_resource_policy_name - ) - - if not policy_document: - logger.info(f'Policy {share_resource_policy_name} is not found') - return + # Parses all policy documents and extracts s3 and kms statements + share_policy_service.initialize_statements() key_alias = f'alias/{self.dataset.KmsAlias}' kms_client = KmsClient(self.dataset_account_id, self.source_environment.region) @@ -681,24 +697,40 @@ def revoke_target_role_access_policy(self): f'arn:aws:s3:{self.dataset_region}:{self.dataset_account_id}:accesspoint/{self.access_point_name}/*', ] - share_policy_service.remove_resource_from_statement( - target_resources=s3_target_resources, - statement_sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', - policy_document=policy_document, - ) + kms_target_resources = [] if kms_key_id: kms_target_resources = [f'arn:aws:kms:{self.dataset_region}:{self.dataset_account_id}:key/{kms_key_id}'] - share_policy_service.remove_resource_from_statement( - target_resources=kms_target_resources, - statement_sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', - policy_document=policy_document, - ) - IAM.update_managed_policy_default_version( - self.target_account_id, - self.target_environment.region, - share_resource_policy_name, - version_id, - json.dumps(policy_document), + + managed_policy_exists = True if share_policy_service.get_managed_policies() else False + + if not managed_policy_exists: + logger.info(f'Managed policies for share with uri: {self.share.shareUri} are not found') + return + + s3_statements = share_policy_service.total_s3_access_point_stmts + s3_statement_chunks = share_policy_service.remove_resources_and_generate_split_statements( + statements=s3_statements, + target_resources=s3_target_resources, + sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + resource_type='s3', + ) + logger.info(f'Number of S3 statements created after splitting: {len(s3_statement_chunks)}') + logger.debug(f'S3 statements after adding resources and splitting: {s3_statement_chunks}') + + s3_kms_statements = share_policy_service.total_s3_access_point_kms_stmts + s3_kms_statement_chunks = share_policy_service.remove_resources_and_generate_split_statements( + statements=s3_kms_statements, + target_resources=kms_target_resources, + sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + resource_type='kms', + ) + logger.info(f'Number of S3 KMS statements created after splitting: {len(s3_kms_statement_chunks)}') + logger.debug(f'S3 KMS statements after adding resources and splitting: {s3_kms_statement_chunks}') + + share_policy_service.merge_statements_and_update_policies( + target_sid=IAM_S3_ACCESS_POINTS_STATEMENT_SID, + target_s3_statements=s3_statement_chunks, + target_s3_kms_statements=s3_kms_statement_chunks, ) def delete_dataset_bucket_key_policy( diff --git a/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_bucket_share_manager.py b/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_bucket_share_manager.py index ca9de13d3..aac6fea78 100644 --- a/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_bucket_share_manager.py +++ b/backend/dataall/modules/s3_datasets_shares/services/share_managers/s3_bucket_share_manager.py @@ -1,9 +1,11 @@ import json import logging from itertools import count - +from typing import List +from warnings import warn from dataall.base.aws.iam import IAM from dataall.base.aws.sts import SessionHelper +from dataall.base.db.exceptions import AWSServiceQuotaExceeded from dataall.core.environment.db.environment_models import Environment from dataall.core.environment.services.environment_service import EnvironmentService from dataall.modules.s3_datasets.db.dataset_models import DatasetBucket @@ -16,7 +18,6 @@ from dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service import ( S3SharePolicyService, IAM_S3_BUCKETS_STATEMENT_SID, - EMPTY_STATEMENT_SID, ) from dataall.modules.s3_datasets_shares.services.share_managers.s3_utils import ( generate_policy_statement, @@ -29,6 +30,7 @@ from dataall.modules.shares_base.db.share_object_repositories import ShareObjectRepository from dataall.modules.shares_base.services.share_exceptions import PrincipalRoleNotFound from dataall.modules.shares_base.services.share_manager_utils import ShareErrorFormatter +from dataall.modules.shares_base.services.share_notification_service import ShareNotificationService from dataall.modules.shares_base.services.shares_enums import PrincipalType from dataall.modules.shares_base.services.sharing_service import ShareData @@ -77,122 +79,144 @@ def check_s3_iam_access(self) -> None: kms_key_id = kms_client.get_key_id(key_alias) share_policy_service = S3SharePolicyService( - environmentUri=self.target_environment.environmentUri, + role_name=self.target_requester_IAMRoleName, account=self.target_environment.AwsAccountId, region=self.target_environment.region, - role_name=self.target_requester_IAMRoleName, + environmentUri=self.target_environment.environmentUri, resource_prefix=self.target_environment.resourcePrefix, ) - share_resource_policy_name = share_policy_service.generate_policy_name() - if not share_policy_service.check_if_policy_exists(): - logger.info(f'IAM Policy {share_resource_policy_name} does not exist') - self.bucket_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy', share_resource_policy_name)) - return + # Parses all policy documents and extracts s3 and kms statements + share_policy_service.initialize_statements() - if not share_policy_service.check_if_policy_attached(): - logger.info( - f'IAM Policy {share_resource_policy_name} exists but is not attached to role {self.share.principalRoleName}' + share_resource_policy_name = share_policy_service.generate_indexed_policy_name(index=0) + is_managed_policies_exists = True if share_policy_service.get_managed_policies() else False + + if not is_managed_policies_exists: + warn( + "Convert all your share's requestor policies to managed policies with indexes. Deprecation >= ?? ", + DeprecationWarning, + stacklevel=2, ) - self.bucket_errors.append( - ShareErrorFormatter.dne_error_msg('IAM Policy attached', share_resource_policy_name) + old_managed_policy_name = share_policy_service.generate_old_policy_name() + old_policy_exist = share_policy_service.check_if_policy_exists(policy_name=old_managed_policy_name) + if not old_policy_exist: + logger.info( + f'No managed policy exists for the role: {self.target_requester_IAMRoleName}. Reapply share to create indexed managed policies.' + ) + self.bucket_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy', share_resource_policy_name)) + return + else: + logger.info( + f'Old managed policy exists for the role: {self.target_requester_IAMRoleName}. Reapply share to create indexed managed policies.' + ) + self.bucket_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy', share_resource_policy_name)) + return + + unattached_policies: List[str] = share_policy_service.get_policies_unattached_to_role() + if len(unattached_policies) > 0: + logger.info( + f'IAM Policies {unattached_policies} exists but are not attached to role {self.share.principalRoleName}' ) + self.bucket_errors.append(ShareErrorFormatter.dne_error_msg('IAM Policy attached', unattached_policies)) return s3_target_resources = [f'arn:aws:s3:::{self.bucket_name}', f'arn:aws:s3:::{self.bucket_name}/*'] - version_id, policy_document = IAM.get_managed_policy_default_version( - self.target_environment.AwsAccountId, self.target_environment.region, share_resource_policy_name - ) - s3_statement_index = S3SharePolicyService._get_statement_by_sid( - policy_document, f'{IAM_S3_BUCKETS_STATEMENT_SID}S3' - ) - - if s3_statement_index is None: - logger.info(f'IAM Policy Statement {IAM_S3_BUCKETS_STATEMENT_SID}S3 does not exist') + if not S3SharePolicyService.check_if_sid_exists( + f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', share_policy_service.total_s3_stmts + ): + logger.info( + f'IAM Policy Statement with Sid: {IAM_S3_BUCKETS_STATEMENT_SID}S3 - where can be 0,1,2.. - does not exist' + ) self.bucket_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, - 'IAM Policy Statement', - f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'IAM Policy Statement Sid', + f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', 'S3 Bucket', f'{self.bucket_name}', ) ) - elif not share_policy_service.check_resource_in_policy_statement( + elif not share_policy_service.check_resource_in_policy_statements( target_resources=s3_target_resources, - existing_policy_statement=policy_document['Statement'][s3_statement_index], + existing_policy_statements=share_policy_service.total_s3_stmts, ): logger.info( - f'IAM Policy Statement {IAM_S3_BUCKETS_STATEMENT_SID}S3 does not contain resources {s3_target_resources}' + f'IAM Policy Statement with Sid {IAM_S3_BUCKETS_STATEMENT_SID}S3 - where can be 0,1,2.. - does not contain resources {s3_target_resources}' ) self.bucket_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, - 'IAM Policy Resource', - f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'IAM Policy Resource(s)', + f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', 'S3 Bucket', f'{self.bucket_name}', ) ) else: - policy_check, missing_permissions, extra_permissions = ( - share_policy_service.check_s3_actions_in_policy_statement( - existing_policy_statement=policy_document['Statement'][s3_statement_index] - ) + policy_sid_actions_map = share_policy_service.check_s3_actions_in_policy_statements( + existing_policy_statements=share_policy_service.total_s3_stmts ) - if not policy_check: - logger.info(f'IAM Policy Statement {IAM_S3_BUCKETS_STATEMENT_SID}S3 has invalid actions') - if missing_permissions: - self.bucket_errors.append( - ShareErrorFormatter.missing_permission_error_msg( - self.target_requester_IAMRoleName, - 'IAM Policy Action', - missing_permissions, - 'S3 Bucket', - f'{self.bucket_name}', + + for sid in policy_sid_actions_map: + policy_check = policy_sid_actions_map[sid].get('policy_check') + missing_permissions = policy_sid_actions_map[sid].get('missing_permissions') + extra_permissions = policy_sid_actions_map[sid].get('extra_permissions') + # Check if policy violations are present + if policy_check: + logger.info(f'IAM Policy Statement {sid} has invalid actions') + if missing_permissions: + self.bucket_errors.append( + ShareErrorFormatter.missing_permission_error_msg( + self.target_requester_IAMRoleName, + 'IAM Policy Action', + missing_permissions, + 'S3 Bucket', + f'{self.bucket_name}', + ) ) - ) - if extra_permissions: - self.bucket_errors.append( - ShareErrorFormatter.not_allowed_permission_error_msg( - self.target_requester_IAMRoleName, - 'IAM Policy Action', - extra_permissions, - 'S3 Bucket', - f'{self.bucket_name}', + if extra_permissions: + self.bucket_errors.append( + ShareErrorFormatter.not_allowed_permission_error_msg( + self.target_requester_IAMRoleName, + 'IAM Policy Action', + extra_permissions, + 'S3 Bucket', + f'{self.bucket_name}', + ) ) - ) if kms_key_id: - kms_statement_index = S3SharePolicyService._get_statement_by_sid( - policy_document, f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS' - ) kms_target_resources = [f'arn:aws:kms:{self.bucket_region}:{self.source_account_id}:key/{kms_key_id}'] - if kms_statement_index is None: - logger.info(f'IAM Policy Statement {IAM_S3_BUCKETS_STATEMENT_SID}KMS does not exist') + + if not S3SharePolicyService.check_if_sid_exists( + f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', share_policy_service.total_s3_kms_stmts + ): + logger.info( + f'IAM Policy Statement with Sid: {IAM_S3_BUCKETS_STATEMENT_SID}KMS - where can be 0,1,2.. - does not exist' + ) self.bucket_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, 'IAM Policy Statement', - f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS-', 'KMS Key', f'{kms_key_id}', ) ) - - elif not share_policy_service.check_resource_in_policy_statement( + elif not share_policy_service.check_resource_in_policy_statements( target_resources=kms_target_resources, - existing_policy_statement=policy_document['Statement'][kms_statement_index], + existing_policy_statements=share_policy_service.total_s3_kms_stmts, ): logger.info( - f'IAM Policy Statement {IAM_S3_BUCKETS_STATEMENT_SID}KMS does not contain resources {kms_target_resources}' + f'IAM Policy Statement {IAM_S3_BUCKETS_STATEMENT_SID}KMS - where can be 0,1,2.. - does not contain resources {kms_target_resources}' ) self.bucket_errors.append( ShareErrorFormatter.missing_permission_error_msg( self.target_requester_IAMRoleName, 'IAM Policy Resource', - f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS-', 'KMS Key', f'{kms_key_id}', ) @@ -207,37 +231,17 @@ def grant_s3_iam_access(self): logger.info(f'Grant target role {self.target_requester_IAMRoleName} access policy') share_policy_service = S3SharePolicyService( - environmentUri=self.target_environment.environmentUri, + role_name=self.target_requester_IAMRoleName, account=self.target_environment.AwsAccountId, region=self.target_environment.region, - role_name=self.target_requester_IAMRoleName, + environmentUri=self.target_environment.environmentUri, resource_prefix=self.target_environment.resourcePrefix, ) + # Process all backwards compatibility tasks and convert to indexed policies + share_policy_service.process_backwards_compatibility_for_target_iam_roles() - # Backwards compatibility - # we check if a managed share policy exists. If False, the role was introduced to data.all before this update - # We create the policy from the inline statements and attach it to the role - if not share_policy_service.check_if_policy_exists(): - share_policy_service.create_managed_policy_from_inline_and_delete_inline() - share_policy_service.attach_policy() - # End of backwards compatibility - - if not share_policy_service.check_if_policy_attached(): - if self.share.principalType == PrincipalType.Group.value: - share_policy_service.attach_policy() - else: - consumption_role = EnvironmentService.get_consumption_role( - session=self.session, uri=self.share.principalId - ) - if consumption_role.dataallManaged: - share_policy_service.attach_policy() - - share_resource_policy_name = share_policy_service.generate_policy_name() - - logger.info(f'Share policy name is {share_resource_policy_name}') - version_id, policy_document = IAM.get_managed_policy_default_version( - self.target_account_id, self.target_environment.region, share_resource_policy_name - ) + # Parses all policy documents and extracts s3 and kms statements + share_policy_service.initialize_statements() key_alias = f'alias/{self.target_bucket.KmsAlias}' kms_client = KmsClient(self.source_account_id, self.source_environment.region) @@ -245,31 +249,64 @@ def grant_s3_iam_access(self): s3_target_resources = [f'arn:aws:s3:::{self.bucket_name}', f'arn:aws:s3:::{self.bucket_name}/*'] - share_policy_service.add_missing_resources_to_policy_statement( - resource_type='s3', + kms_target_resources = [] + if kms_key_id: + kms_target_resources = [f'arn:aws:kms:{self.bucket_region}:{self.source_account_id}:key/{kms_key_id}'] + + s3_statements = share_policy_service.total_s3_stmts + s3_statement_chunks = share_policy_service.add_resources_and_generate_split_statements( + statements=s3_statements, target_resources=s3_target_resources, - statement_sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', - policy_document=policy_document, + sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + resource_type='s3', ) + logger.info(f'Number of S3 statements created after splitting: {len(s3_statement_chunks)}') + logger.debug(f'S3 statements after adding resources and splitting: {s3_statement_chunks}') + + s3_kms_statements = share_policy_service.total_s3_kms_stmts + s3_kms_statement_chunks = share_policy_service.add_resources_and_generate_split_statements( + statements=s3_kms_statements, + target_resources=kms_target_resources, + sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + resource_type='kms', + ) + logger.info(f'Number of S3 KMS statements created after splitting: {len(s3_kms_statement_chunks)}') + logger.debug(f'S3 KMS statements after adding resources and splitting: {s3_kms_statement_chunks}') - share_policy_service.remove_empty_statement(policy_doc=policy_document, statement_sid=EMPTY_STATEMENT_SID) - - if kms_key_id: - kms_target_resources = [f'arn:aws:kms:{self.bucket_region}:{self.source_account_id}:key/{kms_key_id}'] - share_policy_service.add_missing_resources_to_policy_statement( - resource_type='kms', - target_resources=kms_target_resources, - statement_sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', - policy_document=policy_document, + try: + share_policy_service.merge_statements_and_update_policies( + target_sid=IAM_S3_BUCKETS_STATEMENT_SID, + target_s3_statements=s3_statement_chunks, + target_s3_kms_statements=s3_kms_statement_chunks, ) + except AWSServiceQuotaExceeded as e: + error_message = e.message + try: + ShareNotificationService( + session=None, dataset=self.dataset, share=self.share + ).notify_managed_policy_limit_exceeded_action(email_id=self.share.owner) + except Exception as e: + logger.error( + f'Error sending email for notifying that managed policy limit exceeded on role due to: {e}' + ) + finally: + raise Exception(error_message) - IAM.update_managed_policy_default_version( - self.target_account_id, - self.target_environment.region, - share_resource_policy_name, - version_id, - json.dumps(policy_document), - ) + is_unattached_policies = share_policy_service.get_policies_unattached_to_role() + if is_unattached_policies: + logger.info( + f'Found some policies are not attached to the target IAM role: {self.target_requester_IAMRoleName}. Attaching policies now' + ) + if self.share.principalType == PrincipalType.Group.value: + share_managed_policies = share_policy_service.get_managed_policies() + share_policy_service.attach_policies(share_managed_policies) + else: + consumption_role = EnvironmentService.get_consumption_role( + session=self.session, uri=self.share.principalId + ) + if consumption_role.dataallManaged: + share_managed_policies = share_policy_service.get_managed_policies() + share_policy_service.attach_policies(share_managed_policies) def get_bucket_policy_or_default(self): """ @@ -511,18 +548,11 @@ def delete_target_role_access_policy( environmentUri=target_environment.environmentUri, resource_prefix=target_environment.resourcePrefix, ) - # Backwards compatibility - # we check if a managed share policy exists. If False, the role was introduced to data.all before this update - # We create the policy from the inline statements and attach it to the role - if not share_policy_service.check_if_policy_exists(): - share_policy_service.create_managed_policy_from_inline_and_delete_inline() - share_policy_service.attach_policy() - # End of backwards compatibility - - share_resource_policy_name = share_policy_service.generate_policy_name() - version_id, policy_document = IAM.get_managed_policy_default_version( - self.target_account_id, self.target_environment.region, share_resource_policy_name - ) + # Process all backwards compatibility tasks and convert to indexed policies + share_policy_service.process_backwards_compatibility_for_target_iam_roles() + + # Parses all policy documents and extracts s3 and kms statements + share_policy_service.initialize_statements() key_alias = f'alias/{target_bucket.KmsAlias}' kms_client = KmsClient(target_bucket.AwsAccountId, target_bucket.region) @@ -532,25 +562,35 @@ def delete_target_role_access_policy( f'arn:aws:s3:::{target_bucket.S3BucketName}', f'arn:aws:s3:::{target_bucket.S3BucketName}/*', ] - share_policy_service.remove_resource_from_statement( - target_resources=s3_target_resources, - statement_sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', - policy_document=policy_document, - ) + + kms_target_resources = [] if kms_key_id: kms_target_resources = [f'arn:aws:kms:{target_bucket.region}:{target_bucket.AwsAccountId}:key/{kms_key_id}'] - share_policy_service.remove_resource_from_statement( - target_resources=kms_target_resources, - statement_sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', - policy_document=policy_document, - ) - IAM.update_managed_policy_default_version( - self.target_account_id, - self.target_environment.region, - share_resource_policy_name, - version_id, - json.dumps(policy_document), + s3_statements = share_policy_service.total_s3_stmts + s3_statement_chunks = share_policy_service.remove_resources_and_generate_split_statements( + statements=s3_statements, + target_resources=s3_target_resources, + sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + resource_type='s3', + ) + logger.info(f'Number of S3 statements created after splitting: {len(s3_statement_chunks)}') + logger.debug(f'S3 statements after adding resources and splitting: {s3_statement_chunks}') + + s3_kms_statements = share_policy_service.total_s3_kms_stmts + s3_kms_statement_chunks = share_policy_service.remove_resources_and_generate_split_statements( + statements=s3_kms_statements, + target_resources=kms_target_resources, + sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + resource_type='kms', + ) + logger.info(f'Number of S3 KMS statements created after splitting: {len(s3_kms_statement_chunks)}') + logger.debug(f'S3 KMS statements after adding resources and splitting: {s3_kms_statement_chunks}') + + share_policy_service.merge_statements_and_update_policies( + target_sid=IAM_S3_BUCKETS_STATEMENT_SID, + target_s3_statements=s3_statement_chunks, + target_s3_kms_statements=s3_kms_statement_chunks, ) def delete_target_role_bucket_key_policy( diff --git a/backend/dataall/modules/shares_base/services/share_notification_service.py b/backend/dataall/modules/shares_base/services/share_notification_service.py index 8102f3370..7b4c7e379 100644 --- a/backend/dataall/modules/shares_base/services/share_notification_service.py +++ b/backend/dataall/modules/shares_base/services/share_notification_service.py @@ -109,6 +109,40 @@ def notify_persistent_email_reminder(self, email_id: str): ) return notifications + def notify_managed_policy_limit_exceeded_action(self, email_id: str): + share_link_text = '' + if os.environ.get('frontend_domain_url'): + share_link_text = ( + f'

Please visit data.all share link ' + f'to view more details.' + ) + + msg_intro = f"""Dear User,
+ + We are contacting you because a share requested by {email_id} failed because no new managed policy can be attached to your IAM role {self.share.principalRoleName}. + Please check the service quota for the number of managed policies that can be attached to a role in your aws account and increase the limit. + For reference please take a look at this link - https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entities.
+ Or please remove any unused managed policies from that role.
+ + + Note - Previously made shares are not affected but any newly added share items or new shares on requestor role {self.share.principalRoleName} will be fail till the time you increase the IAM quota limit or detach any other managed policy from that role. + """ + + msg_end = """Your prompt attention in this matter is greatly appreciated. +

Best regards, +
The Data.all Team + """ + + subject = 'URGENT: Data.all | Action Required due to failing share' + email_notification_msg = msg_intro + share_link_text + msg_end + + self._create_and_send_email_notifications( + subject=subject, + msg=email_notification_msg, + recipient_groups_list=[self.share.groupUri], + ) + def notify_share_object_approval(self, email_id: str): share_link_text = '' if os.environ.get('frontend_domain_url'): diff --git a/backend/docker/dev/Dockerfile b/backend/docker/dev/Dockerfile index 6c7d3719a..d243db74e 100644 --- a/backend/docker/dev/Dockerfile +++ b/backend/docker/dev/Dockerfile @@ -27,7 +27,6 @@ RUN chown -R ${CONTAINER_USER}:root /tmp USER ${CONTAINER_USER} - ## Add source WORKDIR /build diff --git a/deploy/stacks/container.py b/deploy/stacks/container.py index a65a609e6..a2a042302 100644 --- a/deploy/stacks/container.py +++ b/deploy/stacks/container.py @@ -261,7 +261,7 @@ def add_share_management_task(self): f'ShareManagementTaskContainer{self._envname}', container_name='container', image=ecs.ContainerImage.from_ecr_repository(repository=self._ecr_repository, tag=self._cdkproxy_image_tag), - environment=self._create_env(), + environment=self.env_vars, command=['python3.9', '-m', 'dataall.modules.shares_base.tasks.share_manager_task'], logging=ecs.LogDriver.aws_logs( stream_prefix='task', @@ -292,7 +292,7 @@ def add_share_verifier_task(self): command=['python3.9', '-m', 'dataall.modules.shares_base.tasks.share_verifier_task'], container_id='container', ecr_repository=self._ecr_repository, - environment=self._create_env(), + environment=self.env_vars, image_tag=self._cdkproxy_image_tag, log_group=self.create_log_group(self._envname, self._resource_prefix, log_group_name='share-verifier'), schedule_expression=Schedule.expression('rate(7 days)'), @@ -321,7 +321,7 @@ def add_share_reapplier_task(self): f'ShareReapplierTaskContainer{self._envname}', container_name='container', image=ecs.ContainerImage.from_ecr_repository(repository=self._ecr_repository, tag=self._cdkproxy_image_tag), - environment=self._create_env(), + environment=self.env_vars, command=['python3.9', '-m', 'dataall.modules.shares_base.tasks.share_reapplier_task'], logging=ecs.LogDriver.aws_logs( stream_prefix='task', diff --git a/docker-compose.yaml b/docker-compose.yaml index 2e9597772..497c3989a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,7 +7,7 @@ services: dockerfile: docker/dev/Dockerfile args: CONTAINER_UID: ${UID} - entrypoint: /bin/bash -c 'aws configure set region "eu-west-1" &&. ~/.nvm/nvm.sh && uvicorn cdkproxymain:app --host 0.0.0.0 --port 2805 --reload' + entrypoint: /bin/bash -c 'aws configure set region "eu-west-1" && . ~/.nvm/nvm.sh && uvicorn cdkproxymain:app --host 0.0.0.0 --port 2805 --reload' expose: - 2805 ports: diff --git a/frontend/src/modules/Catalog/components/RequestAccessModal.js b/frontend/src/modules/Catalog/components/RequestAccessModal.js index 1d3184a8a..316df29a2 100644 --- a/frontend/src/modules/Catalog/components/RequestAccessModal.js +++ b/frontend/src/modules/Catalog/components/RequestAccessModal.js @@ -49,7 +49,7 @@ export const RequestAccessModal = (props) => { const [loadingPolicies, setLoadingPolicies] = useState(false); const [roleOptions, setRoleOptions] = useState([]); const [isSharePolicyAttached, setIsSharePolicyAttached] = useState(true); - const [policyName, setPolicyName] = useState(''); + const [unAttachedPolicyNames, setUnAttachedPolicyNames] = useState(''); const [step, setStep] = useState(0); const [share, setShare] = useState(false); @@ -175,15 +175,21 @@ export const RequestAccessModal = (props) => { }) ); if (!response.errors) { - var isSharePolicyAttached = - response.data.getConsumptionRolePolicies.find( - (policy) => policy.policy_type === 'SharePolicy' - ).attached; - setIsSharePolicyAttached(isSharePolicyAttached); - var policyName = response.data.getConsumptionRolePolicies.find( - (policy) => policy.policy_type === 'SharePolicy' - ).policy_name; - setPolicyName(policyName); + let isSharePoliciesAttached = response.data.getConsumptionRolePolicies + .filter((policy) => policy.policy_type === 'SharePolicy') + .map((policy) => policy.attached); + const isAllPoliciesAttached = isSharePoliciesAttached.every( + (value) => value === true + ); + setIsSharePolicyAttached(isAllPoliciesAttached); + let policyNameList = response.data.getConsumptionRolePolicies + .filter((policy) => { + return ( + policy.policy_type === 'SharePolicy' && policy.attached === false + ); + }) + .map((policy) => policy.policy_name); + setUnAttachedPolicyNames(policyNameList); } else { dispatch({ type: SET_ERROR, error: response.errors[0].message }); } @@ -614,7 +620,7 @@ export const RequestAccessModal = (props) => { ); } else { setFieldValue('consumptionRole', ''); - setPolicyName(''); + setUnAttachedPolicyNames(''); } }} renderInput={(params) => ( @@ -746,7 +752,8 @@ export const RequestAccessModal = (props) => { Selected consumption role is managed by customer, but the share policy{' '} - {policyName} is not attached. + {unAttachedPolicyNames} is + not attached.
Please attach it or let Data.all attach it for you. diff --git a/frontend/src/modules/Environments/components/EnvironmentTeams.js b/frontend/src/modules/Environments/components/EnvironmentTeams.js index 995be5786..f2f6bacad 100644 --- a/frontend/src/modules/Environments/components/EnvironmentTeams.js +++ b/frontend/src/modules/Environments/components/EnvironmentTeams.js @@ -44,7 +44,11 @@ import { } from 'design'; import { SET_ERROR, useDispatch } from 'globalErrors'; import { isFeatureEnabled } from 'utils'; -import { useClient, useFetchGroups } from 'services'; +import { + getConsumptionRolePolicies, + useClient, + useFetchGroups +} from 'services'; import { generateEnvironmentAccessToken, getEnvironmentAssumeRoleUrl, @@ -288,6 +292,91 @@ TeamRow.propTypes = { isTeamEditModalOpenId: PropTypes.string }; +export const IAMRolePolicyDataGridCell = ({ environmentUri, IAMRoleName }) => { + const [isLoading, setLoading] = useState(true); + const [managedPolicyDetails, setManagedPolicyDetails] = useState(null); + const dispatch = useDispatch(); + const { enqueueSnackbar } = useSnackbar(); + const client = useClient(); + + useEffect(() => { + if (client) { + getRolePolicies().catch((e) => + dispatch({ type: SET_ERROR, error: e.message }) + ); + } + }, [client, dispatch, enqueueSnackbar]); + + const getRolePolicies = async () => { + setLoading(true); + try { + const response = await client.query( + getConsumptionRolePolicies({ + environmentUri: environmentUri, + IAMRoleName: IAMRoleName + }) + ); + if (!response.errors) { + setManagedPolicyDetails(response.data.getConsumptionRolePolicies); + } else { + dispatch({ type: SET_ERROR, error: response.errors[0].message }); + } + } catch (e) { + dispatch({ type: SET_ERROR, error: e.message }); + } finally { + setLoading(false); + } + }; + + return ( + + {isLoading ? ( + + ) : ( + + + { + await navigator.clipboard.writeText( + managedPolicyDetails.map((policy) => policy.policy_name) + ); + enqueueSnackbar('Policy Name is copied to clipboard', { + anchorOrigin: { + horizontal: 'right', + vertical: 'top' + }, + variant: 'success' + }); + }} + > + + + + )} + + ); +}; + +IAMRolePolicyDataGridCell.propTypes = { + environmentUri: PropTypes.any, + IAMRoleName: PropTypes.any +}; + export const EnvironmentTeams = ({ environment }) => { const client = useClient(); const dispatch = useDispatch(); @@ -746,45 +835,10 @@ export const EnvironmentTeams = ({ environment }) => { headerName: 'IAM Policies', flex: 0.5, renderCell: (params: GridRenderCellParams) => ( - - - { - await navigator.clipboard.writeText( - params.row.managedPolicies.map( - (policy) => policy.policy_name - ) - ); - enqueueSnackbar( - 'Policy Name is copied to clipboard', - { - anchorOrigin: { - horizontal: 'right', - vertical: 'top' - }, - variant: 'success' - } - ); - }} - > - - - + ) }, { diff --git a/frontend/src/modules/Environments/services/listAllEnvironmentConsumptionRoles.js b/frontend/src/modules/Environments/services/listAllEnvironmentConsumptionRoles.js index 04cd522ea..010bebdfb 100644 --- a/frontend/src/modules/Environments/services/listAllEnvironmentConsumptionRoles.js +++ b/frontend/src/modules/Environments/services/listAllEnvironmentConsumptionRoles.js @@ -29,11 +29,6 @@ export const listAllEnvironmentConsumptionRoles = ({ groupUri IAMRoleArn dataallManaged - managedPolicies { - policy_type - policy_name - attached - } } } } diff --git a/tests/modules/s3_datasets_shares/tasks/test_s3_access_point_share_manager.py b/tests/modules/s3_datasets_shares/tasks/test_s3_access_point_share_manager.py index 822743722..18ec6bfa6 100644 --- a/tests/modules/s3_datasets_shares/tasks/test_s3_access_point_share_manager.py +++ b/tests/modules/s3_datasets_shares/tasks/test_s3_access_point_share_manager.py @@ -241,7 +241,7 @@ def _create_target_dataset_access_control_policy(bucket_name, access_point_name) 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': S3_ALLOWED_ACTIONS, 'Resource': [ @@ -252,7 +252,7 @@ def _create_target_dataset_access_control_policy(bucket_name, access_point_name) ], }, { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:eu-west-1:{SOURCE_ENV_ACCOUNT}:key/some-key-2112'], @@ -335,17 +335,31 @@ def test_grant_target_role_access_policy_test_empty_policy( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', initial_policy_document) ) + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + iam_update_role_policy_mock = mocker.patch( 'dataall.base.aws.iam.IAM.update_managed_policy_default_version', return_value=None, ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) access_point_name = share_manager.build_access_point_name(share1) @@ -354,7 +368,7 @@ def test_grant_target_role_access_policy_test_empty_policy( 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': S3_ALLOWED_ACTIONS, 'Resource': [ @@ -365,7 +379,7 @@ def test_grant_target_role_access_policy_test_empty_policy( ], }, { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:{dataset1.region}:{dataset1.AwsAccountId}:key/kms-key'], @@ -380,12 +394,12 @@ def test_grant_target_role_access_policy_test_empty_policy( share_manager.grant_target_role_access_policy() expected_policy_name = S3SharePolicyService( - environmentUri=target_environment.environmentUri, role_name=share1.principalRoleName, account=target_environment.AwsAccountId, region=target_environment.region, + environmentUri=target_environment.environmentUri, resource_prefix=target_environment.resourcePrefix, - ).generate_policy_name() + ).generate_indexed_policy_name(index=0) # Then iam_update_role_policy_mock.assert_called_with( target_environment.AwsAccountId, @@ -405,13 +419,30 @@ def test_grant_target_role_access_policy_existing_policy_bucket_not_included( ) mocker.patch('dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', iam_policy)) + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + return_value=False, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) iam_update_role_policy_mock = mocker.patch( @@ -427,26 +458,27 @@ def test_grant_target_role_access_policy_existing_policy_bucket_not_included( # Then iam_update_role_policy_mock.assert_called() + policy_object = json.loads(iam_update_role_policy_mock.call_args.args[4]) - # Iam function is called with str from object so we transform back to object - policy_object = iam_policy s3_index = S3SharePolicyService._get_statement_by_sid( - policy=policy_object, sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3' + policy=policy_object, sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S31' ) kms_index = S3SharePolicyService._get_statement_by_sid( - policy=policy_object, sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS' + policy=policy_object, sid=f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS1' ) # Assert that bucket_name is inside the resource array of policy object assert location1.S3BucketName in ','.join(policy_object['Statement'][s3_index]['Resource']) assert ( f'arn:aws:kms:{dataset1.region}:{dataset1.AwsAccountId}:key/kms-key' - in iam_policy['Statement'][kms_index]['Resource'] - and 'kms:*' in iam_policy['Statement'][kms_index]['Action'] + in policy_object['Statement'][kms_index]['Resource'] + and 'kms:*' in policy_object['Statement'][kms_index]['Action'] ) # Assert that statements for S3 bucket sharing are unaffected - s3_index = S3SharePolicyService._get_statement_by_sid(policy=policy_object, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') + s3_index = S3SharePolicyService._get_statement_by_sid( + policy=policy_object, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31' + ) def test_grant_target_role_access_policy_existing_policy_bucket_included(mocker, share_manager): @@ -461,10 +493,29 @@ def test_grant_target_role_access_policy_existing_policy_bucket_included(mocker, 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0', 'policy-2'] + ) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0', 'policy-2']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', + return_value=False, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) iam_update_role_policy_mock = mocker.patch( @@ -883,7 +934,7 @@ def test_delete_target_role_access_policy_no_remaining_statement( 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [ @@ -894,7 +945,7 @@ def test_delete_target_role_access_policy_no_remaining_statement( ], }, { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:{dataset1.region}:{dataset1.AwsAccountId}:key/kms-key'], @@ -904,7 +955,7 @@ def test_delete_target_role_access_policy_no_remaining_statement( expected_remaining_target_role_policy = { 'Version': '2012-10-17', - 'Statement': [{'Sid': EMPTY_STATEMENT_SID, 'Effect': 'Allow', 'Action': 'none:null', 'Resource': '*'}], + 'Statement': [{'Sid': EMPTY_STATEMENT_SID, 'Effect': 'Allow', 'Action': ['none:null'], 'Resource': ['*']}], } # Given @@ -913,14 +964,29 @@ def test_delete_target_role_access_policy_no_remaining_statement( ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], + ) + + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0', 'policy-2'] ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0', 'policy-2']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + iam_update_role_policy_mock = mocker.patch( 'dataall.base.aws.iam.IAM.update_managed_policy_default_version', return_value=None, @@ -938,12 +1004,12 @@ def test_delete_target_role_access_policy_no_remaining_statement( share_manager.revoke_target_role_access_policy() expected_policy_name = S3SharePolicyService( - environmentUri=target_environment.environmentUri, role_name=share1.principalRoleName, account=target_environment.AwsAccountId, region=target_environment.region, + environmentUri=target_environment.environmentUri, resource_prefix=target_environment.resourcePrefix, - ).generate_policy_name() + ).generate_indexed_policy_name(index=0) iam_update_role_policy_mock.assert_called_with( target_environment.AwsAccountId, @@ -968,7 +1034,7 @@ def test_delete_target_role_access_policy_with_remaining_statement( 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [ @@ -980,7 +1046,7 @@ def test_delete_target_role_access_policy_with_remaining_statement( ], }, { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [ @@ -995,13 +1061,13 @@ def test_delete_target_role_access_policy_with_remaining_statement( 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}S31', 'Effect': 'Allow', - 'Action': ['s3:*'], + 'Action': ['s3:List*', 's3:Describe*', 's3:GetObject'], 'Resource': ['arn:aws:s3:::UNRELATED_BUCKET_ARN'], }, { - 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_ACCESS_POINTS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:us-east-1:121231131212:key/some-key-2112'], @@ -1016,14 +1082,28 @@ def test_delete_target_role_access_policy_with_remaining_statement( mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + iam_update_role_policy_mock = mocker.patch( 'dataall.base.aws.iam.IAM.update_managed_policy_default_version', return_value=None, @@ -1042,12 +1122,12 @@ def test_delete_target_role_access_policy_with_remaining_statement( # Then expected_policy_name = S3SharePolicyService( - environmentUri=target_environment.environmentUri, role_name=share1.principalRoleName, account=target_environment.AwsAccountId, region=target_environment.region, + environmentUri=target_environment.environmentUri, resource_prefix=target_environment.resourcePrefix, - ).generate_policy_name() + ).generate_indexed_policy_name(index=0) iam_update_role_policy_mock.assert_called_with( target_environment.AwsAccountId, @@ -1211,10 +1291,9 @@ def test_check_target_role_access_policy(mocker, share_manager): 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) - mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) iam_get_policy_mock = mocker.patch( @@ -1225,6 +1304,10 @@ def test_check_target_role_access_policy(mocker, share_manager): ), ) + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + mocker.patch( 'dataall.base.aws.iam.IAM.get_role_arn_by_name', side_effect=lambda account_id, region, role_name: f'arn:aws:iam::{account_id}:role/{role_name}', @@ -1247,12 +1330,15 @@ def test_check_target_role_access_policy_wrong_permissions(mocker, share_manager 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) - mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + bad_policy = _create_target_dataset_access_control_policy( share_manager.bucket_name, share_manager.access_point_name ) @@ -1294,12 +1380,15 @@ def test_check_target_role_access_policy_existing_policy_bucket_and_key_not_incl 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) - mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + # Gets policy with other S3 and KMS iam_get_policy_mock = mocker.patch( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', @@ -1333,10 +1422,14 @@ def test_check_target_role_access_policy_test_no_policy(mocker, share_manager): ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=[]) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=[]) + kms_client = mock_kms_client(mocker) kms_client().get_key_id.return_value = 'kms-key' @@ -1349,13 +1442,21 @@ def test_check_target_role_access_policy_test_no_policy(mocker, share_manager): def test_check_target_role_access_policy_test_policy_not_attached(mocker, share_manager): # Given mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], ) - # Policy is not attached - mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=False, + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + # Gets policy with other S3 and KMS + iam_get_policy_mock = mocker.patch( + 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', + return_value=( + 'v1', + _create_target_dataset_access_control_policy(share_manager.bucket_name, share_manager.access_point_name), + ), ) kms_client = mock_kms_client(mocker) diff --git a/tests/modules/s3_datasets_shares/tasks/test_s3_bucket_share_manager.py b/tests/modules/s3_datasets_shares/tasks/test_s3_bucket_share_manager.py index a8d175e56..b0db7668f 100644 --- a/tests/modules/s3_datasets_shares/tasks/test_s3_bucket_share_manager.py +++ b/tests/modules/s3_datasets_shares/tasks/test_s3_bucket_share_manager.py @@ -416,10 +416,27 @@ def test_grant_role_bucket_policy_with_another_read_only_role( def test_grant_s3_iam_access_with_no_policy(mocker, dataset2, share2_manager): # Given - # The IAM Policy for sharing for the IAM role does not exist (check_if_policy_exists returns False) + # The IAM Policy for sharing on the IAM role does not exist (check_if_policy_exists returns False) # Backwards compatibility: check that the create_managed_policy_from_inline_and_delete_inline is called # Check if the get and update_role_policy func are called and policy statements are added + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], + ) + mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=False, @@ -435,14 +452,19 @@ def test_grant_s3_iam_access_with_no_policy(mocker, dataset2, share2_manager): 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.create_managed_policy_from_inline_and_delete_inline', return_value='arn:iam::someArn', ) + + # Return [] when first called, indicating that managed indexed policies don't exist, Once share_policy_service_mock_1.called is called then return some indexed managed policy + mocker.patch( + 'dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', + side_effect=lambda account_id, region, policy_filter_pattern: [] + if not share_policy_service_mock_1.called + else ['policy-0'], + ) + share_policy_service_mock_2 = mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policy', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) - share_policy_service_mock_3 = mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=False, - ) iam_update_role_policy_mock_1 = mocker.patch( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', empty_policy_document) ) @@ -454,14 +476,12 @@ def test_grant_s3_iam_access_with_no_policy(mocker, dataset2, share2_manager): # Assert IAM and service calls called share_policy_service_mock_1.assert_called_once() share_policy_service_mock_2.assert_called() - share_policy_service_mock_3.assert_called_once() iam_update_role_policy_mock_1.assert_called_once() iam_update_role_policy_mock_2.assert_called_once() + iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) - iam_policy = empty_policy_document - - s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') - kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS') + s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31') + kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1') # Assert if the IAM role policy with S3 and KMS permissions was created assert len(iam_policy['Statement']) == 2 @@ -482,18 +502,39 @@ def test_grant_s3_iam_access_with_no_policy(mocker, dataset2, share2_manager): def test_grant_s3_iam_access_with_empty_policy(mocker, dataset2, share2_manager): # Given - # The IAM Policy for sharing for the IAM role exists (check_if_policy_exists returns True) + # The IAM Policy for sharing for the IAM role exists (check_if_policy_exists returns False but check_if_managed_policies_exists returns True, indicating that IAM indexed IAM policies exist) # And the IAM Policy is empty (get_managed_policy_default_version returns initial_policy_document) # Check if the get and update_role_policy func are called and policy statements are added + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], + ) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', + return_value=False, + ) + kms_client = mock_kms_client(mocker) kms_client().get_key_id.return_value = 'kms-key' @@ -513,11 +554,10 @@ def test_grant_s3_iam_access_with_empty_policy(mocker, dataset2, share2_manager) # Assert IAM called iam_update_role_policy_mock_1.assert_called_once() iam_update_role_policy_mock_2.assert_called_once() + iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) - iam_policy = initial_policy_document - - s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') - kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS') + s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31') + kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1') # Assert if the IAM role policy with S3 and KMS permissions was created assert len(iam_policy['Statement']) == 2 @@ -538,7 +578,7 @@ def test_grant_s3_iam_access_with_empty_policy(mocker, dataset2, share2_manager) def test_grant_s3_iam_access_with_policy_and_target_resources_not_present(mocker, dataset2, share2_manager): # Given - # The IAM Policy for sharing for the IAM role exists (check_if_policy_exists returns True) + # The IAM Policy for sharing for the IAM role exists (check_if_policy_exists returns False but check_if_managed_policies_exists returns True, indicating that IAM indexed IAM policies exist) # And the IAM Policy is NOT empty (get_managed_policy_default_version returns policy) # Check if the get and update_role_policy func are called and policy statements are added to the existing ones @@ -546,13 +586,13 @@ def test_grant_s3_iam_access_with_policy_and_target_resources_not_present(mocker 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [f'arn:aws:s3:::S3Bucket', f'arn:aws:s3:::S3Bucket/*'], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:us-east-1:12121121121:key/some-kms-key'], @@ -560,16 +600,36 @@ def test_grant_s3_iam_access_with_policy_and_target_resources_not_present(mocker ], } + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], + ) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) - s3_index = S3SharePolicyService._get_statement_by_sid(policy=policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') - kms_index = S3SharePolicyService._get_statement_by_sid(policy=policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS') + + s3_index = S3SharePolicyService._get_statement_by_sid(policy=policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31') + kms_index = S3SharePolicyService._get_statement_by_sid(policy=policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1') assert len(policy['Statement']) == 2 assert len(policy['Statement'][s3_index]['Resource']) == 2 @@ -590,7 +650,7 @@ def test_grant_s3_iam_access_with_policy_and_target_resources_not_present(mocker iam_update_role_policy_mock_1.assert_called_once() iam_update_role_policy_mock_2.assert_called_once() - iam_policy = policy + iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) # Assert that new resources were appended assert len(policy['Statement']) == 2 @@ -605,7 +665,7 @@ def test_grant_s3_iam_access_with_policy_and_target_resources_not_present(mocker def test_grant_s3_iam_access_with_complete_policy_present(mocker, dataset2, share2_manager): # Given - # The IAM Policy for sharing for the IAM role exists (check_if_policy_exists returns True) + # The IAM Policy for sharing for the IAM role exists (check_if_policy_exists returns False but get_managed_policies returns ['policy-0'], indicating that indexed IAM policies exist) # And the IAM Policy is NOT empty and already contains all target resources (get_managed_policy_default_version returns policy) # Check if policy created after calling function and the existing Policy are the same @@ -613,9 +673,9 @@ def test_grant_s3_iam_access_with_complete_policy_present(mocker, dataset2, shar 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', - 'Action': ['s3:*'], + 'Action': S3_ALLOWED_ACTIONS, 'Resource': [ f'arn:aws:s3:::{dataset2.S3BucketName}', f'arn:aws:s3:::{dataset2.S3BucketName}/*', @@ -624,7 +684,7 @@ def test_grant_s3_iam_access_with_complete_policy_present(mocker, dataset2, shar ], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:{dataset2.region}:{dataset2.AwsAccountId}:key/kms-key'], @@ -632,14 +692,34 @@ def test_grant_s3_iam_access_with_complete_policy_present(mocker, dataset2, shar ], } + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], + ) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + kms_client = mock_kms_client(mocker) kms_client().get_key_id.return_value = 'kms-key' @@ -655,10 +735,14 @@ def test_grant_s3_iam_access_with_complete_policy_present(mocker, dataset2, shar iam_update_role_policy_mock_1.assert_called_once() iam_update_role_policy_mock_2.assert_called_once() - created_iam_policy = policy + created_iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) - s3_index = S3SharePolicyService._get_statement_by_sid(policy=policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') - kms_index = S3SharePolicyService._get_statement_by_sid(policy=policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS') + s3_index = S3SharePolicyService._get_statement_by_sid( + policy=created_iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31' + ) + kms_index = S3SharePolicyService._get_statement_by_sid( + policy=created_iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1' + ) assert len(created_iam_policy['Statement']) == 2 assert ( @@ -922,11 +1006,31 @@ def test_delete_target_role_access_no_policy_no_other_resources_shared( 'Statement': [{'Sid': EMPTY_STATEMENT_SID, 'Effect': 'Allow', 'Action': 'none:null', 'Resource': '*'}], } + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=False, ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], + ) + kms_client = mock_kms_client(mocker) kms_client().get_key_id.return_value = 'kms-key' @@ -934,8 +1038,17 @@ def test_delete_target_role_access_no_policy_no_other_resources_shared( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.create_managed_policy_from_inline_and_delete_inline', return_value='arn:iam::someArn', ) + + # Return [] when first called, indicating that managed indexed policies don't exist, Once share_policy_service_mock_1.called is called then return some indexed managed policy + mocker.patch( + 'dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', + side_effect=lambda account_id, region, policy_filter_pattern: [] + if not share_policy_service_mock_1.called + else ['policy-0'], + ) + share_policy_service_mock_2 = mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policy', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) @@ -957,16 +1070,10 @@ def test_delete_target_role_access_no_policy_no_other_resources_shared( iam_update_role_policy_mock_2.assert_called_once() # Get the updated IAM policy and compare it with the existing one - updated_iam_policy = policy_document - s3_index = S3SharePolicyService._get_statement_by_sid( - policy=updated_iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3' - ) - kms_index = S3SharePolicyService._get_statement_by_sid( - policy=updated_iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS' - ) + updated_iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) assert len(updated_iam_policy['Statement']) == 1 - assert '*' == updated_iam_policy['Statement'][0]['Resource'] + assert ['*'] == updated_iam_policy['Statement'][0]['Resource'] def test_delete_target_role_access_policy_no_resource_of_datasets_s3_bucket( @@ -980,13 +1087,13 @@ def test_delete_target_role_access_policy_no_resource_of_datasets_s3_bucket( 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [f'arn:aws:s3:::someOtherBucket', f'arn:aws:s3:::someOtherBucket/*'], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:us-east-1:121231131212:key/some-key-2112'], @@ -994,13 +1101,29 @@ def test_delete_target_role_access_policy_no_resource_of_datasets_s3_bucket( ], } + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, ) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], + ) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', + return_value=False, ) kms_client = mock_kms_client(mocker) @@ -1021,9 +1144,10 @@ def test_delete_target_role_access_policy_no_resource_of_datasets_s3_bucket( iam_update_role_policy_mock_2.assert_called_once() # Get the updated IAM policy and compare it with the existing one - updated_iam_policy = iam_policy - s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') - kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS') + updated_iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) + + s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31') + kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1') assert len(updated_iam_policy['Statement']) == 2 assert 'arn:aws:s3:::someOtherBucket,arn:aws:s3:::someOtherBucket/*' == ','.join( @@ -1045,7 +1169,7 @@ def test_delete_target_role_access_policy_with_multiple_s3_buckets_in_policy( 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [ @@ -1056,7 +1180,7 @@ def test_delete_target_role_access_policy_with_multiple_s3_buckets_in_policy( ], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [ @@ -1067,13 +1191,29 @@ def test_delete_target_role_access_policy_with_multiple_s3_buckets_in_policy( ], } + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], ) iam_update_role_policy_mock_1 = mocker.patch( @@ -1093,10 +1233,10 @@ def test_delete_target_role_access_policy_with_multiple_s3_buckets_in_policy( iam_update_role_policy_mock_1.assert_called_once() iam_update_role_policy_mock_2.assert_called_once() - updated_iam_policy = iam_policy + updated_iam_policy = json.loads(iam_update_role_policy_mock_2.call_args.args[4]) - s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S3') - kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS') + s3_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}S31') + kms_index = S3SharePolicyService._get_statement_by_sid(policy=iam_policy, sid=f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1') assert f'arn:aws:s3:::{dataset2.S3BucketName}' not in updated_iam_policy['Statement'][s3_index]['Resource'] assert f'arn:aws:s3:::{dataset2.S3BucketName}/*' not in updated_iam_policy['Statement'][s3_index]['Resource'] @@ -1124,7 +1264,7 @@ def test_delete_target_role_access_policy_with_one_s3_bucket_and_one_kms_resourc 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [ @@ -1133,7 +1273,7 @@ def test_delete_target_role_access_policy_with_one_s3_bucket_and_one_kms_resourc ], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:{dataset2.region}:{dataset2.AwsAccountId}:key/kms-key'], @@ -1141,13 +1281,29 @@ def test_delete_target_role_access_policy_with_one_s3_bucket_and_one_kms_resourc ], } + mocker.patch('dataall.base.aws.iam.IAM.create_managed_policy', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_non_default_versions', return_value=True) + + mocker.patch('dataall.base.aws.iam.IAM.delete_managed_policy_by_name', return_value=True) + + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService._get_managed_policy_quota', + return_value=10, + ) + mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], ) kms_client = mock_kms_client(mocker) @@ -1384,27 +1540,34 @@ def test_check_s3_iam_access(mocker, dataset2, share2_manager): 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:List*', 's3:Describe*', 's3:GetObject'], 'Resource': [f'arn:aws:s3:::{dataset2.S3BucketName}', f'arn:aws:s3:::{dataset2.S3BucketName}/*'], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:{dataset2.region}:{dataset2.AwsAccountId}:key/kms-key'], }, ], } + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) + # Gets policy with S3 and KMS iam_update_role_policy_mock_1 = mocker.patch( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', policy) @@ -1427,27 +1590,33 @@ def test_check_s3_iam_access_wrong_actions(mocker, dataset2, share2_manager): 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [f'arn:aws:s3:::{dataset2.S3BucketName}', f'arn:aws:s3:::{dataset2.S3BucketName}/*'], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:{dataset2.region}:{dataset2.AwsAccountId}:key/kms-key'], }, ], } + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) + # Gets policy with S3 and KMS iam_update_role_policy_mock_1 = mocker.patch( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', policy) @@ -1473,8 +1642,16 @@ def test_check_s3_iam_access_no_policy(mocker, dataset2, share2_manager): # There is not existing IAM policy in the requesters account for the dataset's S3bucket # Check if the update_role_policy func is called and policy statements are added - # When policy does not exist - iam_update_role_policy_mock_1 = mocker.patch( + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], + ) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=[]) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=[]) + + mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=False, ) @@ -1484,7 +1661,6 @@ def test_check_s3_iam_access_no_policy(mocker, dataset2, share2_manager): # When share2_manager.check_s3_iam_access() # Then - iam_update_role_policy_mock_1.assert_called_once() assert (len(share2_manager.bucket_errors)) == 1 assert 'IAM Policy Target Resource' in share2_manager.bucket_errors[0] @@ -1494,15 +1670,39 @@ def test_check_s3_iam_access_policy_not_attached(mocker, dataset2, share2_manage # There is not existing IAM policy in the requesters account for the dataset's S3bucket # Check if the update_role_policy func is called and policy statements are added + policy = { + 'Version': '2012-10-17', + 'Statement': [ + { + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', + 'Effect': 'Allow', + 'Action': ['s3:*'], + 'Resource': [f'arn:aws:s3:::{dataset2.S3BucketName}', f'arn:aws:s3:::{dataset2.S3BucketName}/*'], + }, + { + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', + 'Effect': 'Allow', + 'Action': ['kms:*'], + 'Resource': [f'arn:aws:kms:{dataset2.region}:{dataset2.AwsAccountId}:key/kms-key'], + }, + ], + } + + mocker.patch('dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', policy)) + # When policy does not exist - iam_update_role_policy_mock_1 = mocker.patch( + mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) - mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=False, + + iam_update_role_policy_mock_1 = mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=['policy-0'], ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=[]) kms_client = mock_kms_client(mocker) kms_client().get_key_id.return_value = 'kms-key' @@ -1523,7 +1723,7 @@ def test_check_s3_iam_access_missing_policy_statement(mocker, dataset2, share2_m 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:us-east-1:12121121121:key/some-kms-key'], @@ -1534,10 +1734,15 @@ def test_check_s3_iam_access_missing_policy_statement(mocker, dataset2, share2_m 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) # Gets policy with other S3 and KMS iam_update_role_policy_mock_1 = mocker.patch( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', policy) @@ -1550,7 +1755,8 @@ def test_check_s3_iam_access_missing_policy_statement(mocker, dataset2, share2_m # Then iam_update_role_policy_mock_1.assert_called_once() assert ( - f'missing IAM Policy Statement permissions: {IAM_S3_BUCKETS_STATEMENT_SID}S3' in share2_manager.bucket_errors[0] + f'missing IAM Policy Statement Sid permissions: {IAM_S3_BUCKETS_STATEMENT_SID}S3' + in share2_manager.bucket_errors[0] ) assert ( f'missing IAM Policy Resource permissions: {IAM_S3_BUCKETS_STATEMENT_SID}KMS' in share2_manager.bucket_errors[1] @@ -1565,27 +1771,33 @@ def test_check_s3_iam_access_missing_target_resource(mocker, dataset2, share2_ma 'Version': '2012-10-17', 'Statement': [ { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S3', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}S31', 'Effect': 'Allow', 'Action': ['s3:*'], 'Resource': [f'arn:aws:s3:::S3Bucket', f'arn:aws:s3:::S3Bucket/*'], }, { - 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS', + 'Sid': f'{IAM_S3_BUCKETS_STATEMENT_SID}KMS1', 'Effect': 'Allow', 'Action': ['kms:*'], 'Resource': [f'arn:aws:kms:us-east-1:12121121121:key/some-kms-key'], }, ], } + mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value=[], ) + + mocker.patch('dataall.base.aws.iam.IAM.get_attached_managed_policies_to_role', return_value=['policy-0']) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', return_value=True, ) + # Gets policy with other S3 and KMS iam_update_role_policy_mock_1 = mocker.patch( 'dataall.base.aws.iam.IAM.get_managed_policy_default_version', return_value=('v1', policy) @@ -1599,7 +1811,8 @@ def test_check_s3_iam_access_missing_target_resource(mocker, dataset2, share2_ma iam_update_role_policy_mock_1.assert_called_once() assert (len(share2_manager.bucket_errors)) == 2 assert ( - f'missing IAM Policy Resource permissions: {IAM_S3_BUCKETS_STATEMENT_SID}S3' in share2_manager.bucket_errors[0] + f'missing IAM Policy Resource(s) permissions: {IAM_S3_BUCKETS_STATEMENT_SID}S3' + in share2_manager.bucket_errors[0] ) assert ( f'missing IAM Policy Resource permissions: {IAM_S3_BUCKETS_STATEMENT_SID}KMS' in share2_manager.bucket_errors[1] diff --git a/tests/modules/s3_datasets_shares/test_share.py b/tests/modules/s3_datasets_shares/test_share.py index 1fbd3770c..1a1892333 100644 --- a/tests/modules/s3_datasets_shares/test_share.py +++ b/tests/modules/s3_datasets_shares/test_share.py @@ -8,6 +8,7 @@ from assertpy import assert_that from dataall.base.utils.expiration_util import ExpirationUtils +from dataall.base.utils.naming_convention import NamingConventionPattern, NamingConventionService from dataall.core.environment.db.environment_models import Environment, EnvironmentGroup, ConsumptionRole from dataall.core.organizations.db.organization_models import Organization from dataall.modules.shares_base.services.share_object_service import ShareObjectService @@ -1272,15 +1273,18 @@ def test_create_share_object_as_requester(mocker, client, user2, group2, env2gro 'dataall.base.aws.iam.IAM.get_role_arn_by_name', return_value='role_arn', ) - # When a user that belongs to environment and group creates request + + old_policy_exists = False mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=old_policy_exists, ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', return_value=True, ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1306,14 +1310,18 @@ def test_create_share_object_as_approver_and_requester(mocker, client, user, gro 'dataall.base.aws.iam.IAM.get_role_arn_by_name', return_value='role_arn', ) + old_policy_exists = False mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=old_policy_exists, ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', return_value=True, ) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1339,14 +1347,18 @@ def test_create_share_object_invalid_account(mocker, client, user, group2, env2g 'dataall.base.aws.iam.IAM.get_role_arn_by_name', return_value='role_arn', ) + old_policy_exists = False mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=old_policy_exists, ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', return_value=True, ) + + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1375,12 +1387,18 @@ def test_create_share_object_with_item_authorized( ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', + return_value=False, + ) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', return_value=True, ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', return_value=True, ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1424,16 +1442,18 @@ def test_create_share_object_share_policy_not_attached_attachMissingPolicies_ena ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=False, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value='policy-0', ) attach_mocker = mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policy', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1465,16 +1485,18 @@ def test_create_share_object_share_policy_not_attached_attachMissingPolicies_dis ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=False, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value='policy-0', ) attach_mocker = mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policy', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1506,12 +1528,14 @@ def test_create_share_object_share_policy_not_attached_attachMissingPolicies_dis ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', - return_value=False, + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value='policy-0', ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + consumption_role = MagicMock(spec_set=ConsumptionRole) consumption_role.IAMRoleName = 'randomName' consumption_role.IAMRoleArn = 'randomArn' @@ -1533,8 +1557,8 @@ def test_create_share_object_share_policy_not_attached_attachMissingPolicies_dis principalType=PrincipalType.ConsumptionRole.value, ) # Then share object is not created and an error appears - assert 'Required customer managed policy' in create_share_object_response.errors[0].message - assert 'is not attached to role randomName' in create_share_object_response.errors[0].message + assert 'Required customer managed policies' in create_share_object_response.errors[0].message + assert 'are not attached to role randomName' in create_share_object_response.errors[0].message def test_create_share_object_with_share_expiration_added( @@ -1546,12 +1570,18 @@ def test_create_share_object_with_share_expiration_added( ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value='policy-0', + ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1586,12 +1616,18 @@ def test_create_share_object_with_non_expiring_share( ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value='policy-0', + ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + create_share_object_response = create_share_object( mocker=mocker, client=client, @@ -1620,12 +1656,18 @@ def test_create_share_object_with_share_expiration_incorrect_share_expiration( ) mocker.patch( 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_exists', - return_value=True, + return_value=False, ) mocker.patch( - 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.check_if_policy_attached', + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.get_policies_unattached_to_role', + return_value='policy-0', + ) + mocker.patch('dataall.base.aws.iam.IAM.list_policy_names_by_policy_pattern', return_value=['policy-0']) + mocker.patch( + 'dataall.modules.s3_datasets_shares.services.s3_share_managed_policy_service.S3SharePolicyService.attach_policies', return_value=True, ) + create_share_object_response = create_share_object( mocker=mocker, client=client, diff --git a/tests/permissions.py b/tests/permissions.py index b5746c5ca..4a3212eb5 100644 --- a/tests/permissions.py +++ b/tests/permissions.py @@ -209,9 +209,6 @@ def __post_init__(self): field_id('Category', 'terms'): TestData( resource_ignore=IgnoreReason.INTRAMODULE, tenant_ignore=IgnoreReason.NOTREQUIRED ), - field_id('ConsumptionRole', 'managedPolicies'): TestData( - resource_perm=GET_ENVIRONMENT, tenant_ignore=IgnoreReason.NOTREQUIRED - ), field_id('Dashboard', 'environment'): TestData( resource_perm=GET_ENVIRONMENT, tenant_ignore=IgnoreReason.NOTREQUIRED ),