diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index a06b7d5f59..0fc357507e 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -185,6 +185,7 @@ jobs: "flex", "ip-access-list", "networkcontainer-controller", + "networkpeering-controller", ] steps: - name: Get repo files from cache diff --git a/.github/workflows/validate-manifests.yml b/.github/workflows/validate-manifests.yml index 1cacbb6496..56e179b49a 100644 --- a/.github/workflows/validate-manifests.yml +++ b/.github/workflows/validate-manifests.yml @@ -16,4 +16,4 @@ jobs: with: enable-cache: 'true' - name: Run testing - run: devbox run -- 'make validate-manifests' + run: devbox run -- 'git restore . && make validate-manifests' diff --git a/.mockery.yaml b/.mockery.yaml index 9e1d37f88f..3835bb0cd1 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -17,3 +17,4 @@ packages: github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/maintenancewindow: github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/encryptionatrest: github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer: + github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering: diff --git a/PROJECT b/PROJECT index 0e847c6f1a..f428398354 100644 --- a/PROJECT +++ b/PROJECT @@ -135,4 +135,12 @@ resources: kind: AtlasNetworkContainer path: github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1 version: v1 +- api: + crdVersion: v1 + namespaced: true + domain: mongodb.com + group: atlas + kind: AtlasNetworkPeering + path: github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1 + version: v1 version: "3" diff --git a/api/v1/atlasnetworkpeering_types.go b/api/v1/atlasnetworkpeering_types.go index 1829befba8..2270c5e2c7 100644 --- a/api/v1/atlasnetworkpeering_types.go +++ b/api/v1/atlasnetworkpeering_types.go @@ -16,14 +16,76 @@ limitations under the License. package v1 +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" +) + +func init() { + SchemeBuilder.Register(&AtlasNetworkPeering{}, &AtlasNetworkPeeringList{}) +} + +// AtlasNetworkPeering is the Schema for the AtlasNetworkPeering API +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status` +// +kubebuilder:printcolumn:name="Provider",type=string,JSONPath=`.spec.provider` +// +kubebuilder:printcolumn:name="Id",type=string,JSONPath=`.status.id` +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.status` +// +kubebuilder:subresource:status +// +groupName:=atlas.mongodb.com +// +kubebuilder:resource:categories=atlas,shortName=anp +type AtlasNetworkPeering struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AtlasNetworkPeeringSpec `json:"spec,omitempty"` + Status status.AtlasNetworkPeeringStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AtlasNetworkPeeringList contains a list of AtlasNetworkPeering +type AtlasNetworkPeeringList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AtlasNetworkPeering `json:"items"` +} + +// +kubebuilder:validation:XValidation:rule="(has(self.externalProjectRef) && !has(self.projectRef)) || (!has(self.externalProjectRef) && has(self.projectRef))",message="must define only one project reference through externalProjectRef or projectRef" +// +kubebuilder:validation:XValidation:rule="(has(self.externalProjectRef) && has(self.connectionSecret)) || !has(self.externalProjectRef)",message="must define a local connection secret when referencing an external project" +// +kubebuilder:validation:XValidation:rule="(has(self.containerRef.name) && !has(self.containerRef.id)) || (!has(self.containerRef.name) && has(self.containerRef.id))",message="must either have a container Atlas id or Kubernetes name, but not both (or neither)" + +// AtlasNetworkPeeringSpec defines the desired state of AtlasNetworkPeering +type AtlasNetworkPeeringSpec struct { + ProjectDualReference `json:",inline"` + + ContainerRef ContainerDualReference `json:"containerRef"` + + AtlasNetworkPeeringConfig `json:",inline"` +} + +// ContainerDualReference refers to an Network Container either by Kubernetes name or Atlas ID +type ContainerDualReference struct { + // Name of the container Kubernetes resource, must be present in the same namespace + // Use either name or ID, not both. + // +optional + Name string `json:"name,omitempty"` + + // ID is the Atlas identifier of the Network Container Atlas resource this Peering Connection relies on + // Use either name or ID, not both. + // +optional + ID string `json:"id,omitempty"` +} + +// AtlasNetworkPeeringConfig defines the Atlas specifics of the desired state of Peering Connections type AtlasNetworkPeeringConfig struct { // Name of the cloud service provider for which you want to create the network peering service. // +kubebuilder:validation:Enum=AWS;GCP;AZURE // +kubebuilder:validation:Required Provider string `json:"provider"` - // ID of the network peer container. If not set, operator will create a new container with ContainerRegion and AtlasCIDRBlock input. - // +optional - ContainerID string `json:"containerId"` // AWSConfiguration is the specific AWS settings for network peering // +kubebuilder:validation:Optional @@ -36,40 +98,66 @@ type AtlasNetworkPeeringConfig struct { GCPConfiguration *GCPNetworkPeeringConfiguration `json:"gcpConfiguration,omitempty"` } -type AtlasProviderContainerConfig struct { - // ContainerRegion is the provider region name of Atlas network peer container. If not set, AccepterRegionName is used. - // +optional - ContainerRegion string `json:"containerRegion"` - // Atlas CIDR. It needs to be set if ContainerID is not set. - // +optional - AtlasCIDRBlock string `json:"atlasCidrBlock"` -} - +// AWSNetworkPeeringConfiguration defines tha Atlas desired state for AWS type AWSNetworkPeeringConfiguration struct { - // AccepterRegionName is the provider region name of user's vpc. + // AccepterRegionName is the provider region name of user's vpc in AWS native region format + // +kubebuilder:validation:Required AccepterRegionName string `json:"accepterRegionName"` // AccountID of the user's vpc. + // +kubebuilder:validation:Required AWSAccountID string `json:"awsAccountId,omitempty"` // User VPC CIDR. + // +kubebuilder:validation:Required RouteTableCIDRBlock string `json:"routeTableCidrBlock,omitempty"` // AWS VPC ID. + // +kubebuilder:validation:Required VpcID string `json:"vpcId,omitempty"` } +// AzureNetworkPeeringConfiguration defines tha Atlas desired state for Azure type AzureNetworkPeeringConfiguration struct { //AzureDirectoryID is the unique identifier for an Azure AD directory. + // +kubebuilder:validation:Required AzureDirectoryID string `json:"azureDirectoryId,omitempty"` // AzureSubscriptionID is the unique identifier of the Azure subscription in which the VNet resides. + // +kubebuilder:validation:Required AzureSubscriptionID string `json:"azureSubscriptionId,omitempty"` //ResourceGroupName is the name of your Azure resource group. + // +kubebuilder:validation:Required ResourceGroupName string `json:"resourceGroupName,omitempty"` // VNetName is name of your Azure VNet. Its applicable only for Azure. - VNetName string `json:"vnetName,omitempty"` + // +kubebuilder:validation:Required + VNetName string `json:"vNetName,omitempty"` } +// GCPNetworkPeeringConfiguration defines tha Atlas desired state for Google type GCPNetworkPeeringConfiguration struct { // User GCP Project ID. Its applicable only for GCP. + // +kubebuilder:validation:Required GCPProjectID string `json:"gcpProjectId,omitempty"` // GCP Network Peer Name. Its applicable only for GCP. + // +kubebuilder:validation:Required NetworkName string `json:"networkName,omitempty"` } + +func (np *AtlasNetworkPeering) GetStatus() api.Status { + return np.Status +} + +func (np *AtlasNetworkPeering) Credentials() *api.LocalObjectReference { + return np.Spec.ConnectionSecret +} + +func (np *AtlasNetworkPeering) ProjectDualRef() *ProjectDualReference { + return &np.Spec.ProjectDualReference +} + +func (np *AtlasNetworkPeering) UpdateStatus(conditions []api.Condition, options ...api.Option) { + np.Status.Conditions = conditions + np.Status.ObservedGeneration = np.ObjectMeta.Generation + + for _, o := range options { + v := o.(status.AtlasNetworkPeeringStatusOption) + v(&np.Status) + } +} diff --git a/api/v1/atlasnetworkpeering_types_test.go b/api/v1/atlasnetworkpeering_types_test.go new file mode 100644 index 0000000000..49499c43c1 --- /dev/null +++ b/api/v1/atlasnetworkpeering_types_test.go @@ -0,0 +1,77 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/cel" +) + +func TestPeeringCELChecks(t *testing.T) { + for _, tc := range []struct { + title string + obj *AtlasNetworkPeering + expectedErrors []string + }{ + { + title: "Missing container ref in peering fails", + obj: &AtlasNetworkPeering{ + Spec: AtlasNetworkPeeringSpec{}, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": must either have a container Atlas id or Kubernetes name, but not both (or neither)"}, + }, + + { + title: "Named container ref works", + obj: &AtlasNetworkPeering{ + Spec: AtlasNetworkPeeringSpec{ + ContainerRef: ContainerDualReference{ + Name: "Some-name", + }, + }, + }, + }, + + { + title: "Container id ref works", + obj: &AtlasNetworkPeering{ + Spec: AtlasNetworkPeeringSpec{ + ContainerRef: ContainerDualReference{ + ID: "some-id", + }, + }, + }, + }, + + { + title: "Both container id and name ref fails", + obj: &AtlasNetworkPeering{ + Spec: AtlasNetworkPeeringSpec{ + ContainerRef: ContainerDualReference{ + Name: "Some-name", + ID: "some-id", + }, + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": must either have a container Atlas id or Kubernetes name, but not both (or neither)"}, + }, + } { + t.Run(tc.title, func(t *testing.T) { + // inject a project to avoid other CEL validations being hit + tc.obj.Spec.ProjectRef = &common.ResourceRefNamespaced{Name: "some-project"} + unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.obj) + require.NoError(t, err) + + crdPath := "../../config/crd/bases/atlas.mongodb.com_atlasnetworkpeerings.yaml" + validator, err := cel.VersionValidatorFromFile(t, crdPath, "v1") + assert.NoError(t, err) + errs := validator(unstructuredObject, nil) + + require.Equal(t, tc.expectedErrors, cel.ErrorListAsStrings(errs)) + }) + } +} diff --git a/api/v1/project_reference_cel_test.go b/api/v1/project_reference_cel_test.go index 1b366593ab..f34e0e6ed5 100644 --- a/api/v1/project_reference_cel_test.go +++ b/api/v1/project_reference_cel_test.go @@ -55,6 +55,14 @@ var dualRefCRDs = []struct { }, filename: "atlas.mongodb.com_atlasnetworkcontainers.yaml", }, + { + obj: &AtlasNetworkPeering{ + Spec: AtlasNetworkPeeringSpec{ // Avoid triggering peering specific validations + ContainerRef: ContainerDualReference{Name: "fake-ref"}, + }, + }, + filename: "atlas.mongodb.com_atlasnetworkpeerings.yaml", + }, } var testCases = []struct { @@ -165,7 +173,14 @@ func TestProjectDualReferenceCELValidations(t *testing.T) { assert.NoError(t, err) errs := validator(unstructuredObject, unstructuredOldObject) - require.Equal(t, tc.expectedErrors, cel.ErrorListAsStrings(errs)) + for i, err := range errs { + fmt.Printf("%s error %d: %v\n", title, i, err) + } + + require.Equal(t, len(tc.expectedErrors), len(errs)) + for i, err := range errs { + assert.Equal(t, tc.expectedErrors[i], err.Error()) + } }) } } diff --git a/api/v1/status/atlasnetworkpeering.go b/api/v1/status/atlasnetworkpeering.go index 63114d67d2..e514fd3a09 100644 --- a/api/v1/status/atlasnetworkpeering.go +++ b/api/v1/status/atlasnetworkpeering.go @@ -1,13 +1,39 @@ package status -// AWSContainerStatus contains AWS only related status information -type AWSContainerStatus struct { +import "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + +// AtlasNetworkPeeringStatus is a status for the AtlasNetworkPeering Custom resource. +// Not the one included in the AtlasProject +type AtlasNetworkPeeringStatus struct { + api.Common `json:",inline"` + + // ID recrods the identified of the peer created by Atlas + ID string `json:"id,omitempty"` + + // Status describes the last status seen for the network peering setup + Status string `json:"status,omitempty"` + + // AWSStatus contains AWS only related status information + AWSStatus *AWSPeeringStatus `json:"awsStatus,omitempty"` + + // AzureStatus contains Azure only related status information + AzureStatus *AzurePeeringStatus `json:"azureStatus,omitempty"` + + // GCPStatus contains GCP only related status information + GCPStatus *GCPPeeringStatus `json:"gcpStatus,omitempty"` +} + +// AWSPeeringStatus contains AWS only related status for network peering & container +type AWSPeeringStatus struct { // VpcID is AWS VPC id on the Atlas side VpcID string `json:"vpcId,omitempty"` + + // ConnectionID is the AWS VPC peering connection ID + ConnectionID string `json:"connectionId,omitempty"` } -// AzureContainerStatus contains Azure only related status information -type AzureContainerStatus struct { +// AzurePeeringStatus contains Azure only related status information +type AzurePeeringStatus struct { // AzureSubscriptionID is Azure Subscription id on the Atlas side AzureSubscriptionID string `json:"azureSubscriptionIDpcId,omitempty"` @@ -15,11 +41,15 @@ type AzureContainerStatus struct { VnetName string `json:"vNetName,omitempty"` } -// GCPContainerStatus contains GCP only related status information -type GCPContainerStatus struct { +// GCPPeeringStatus contains GCP only related status information +type GCPPeeringStatus struct { // GCPProjectID is GCP project on the Atlas side GCPProjectID string `json:"gcpProjectID,omitempty"` // NetworkName is GCP network on the Atlas side NetworkName string `json:"networkName,omitempty"` } + +// +kubebuilder:object:generate=false + +type AtlasNetworkPeeringStatusOption func(s *AtlasNetworkPeeringStatus) diff --git a/api/v1/status/zz_generated.deepcopy.go b/api/v1/status/zz_generated.deepcopy.go index 70389d39a2..dfa79b2633 100644 --- a/api/v1/status/zz_generated.deepcopy.go +++ b/api/v1/status/zz_generated.deepcopy.go @@ -19,16 +19,16 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSContainerStatus) DeepCopyInto(out *AWSContainerStatus) { +func (in *AWSPeeringStatus) DeepCopyInto(out *AWSPeeringStatus) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSContainerStatus. -func (in *AWSContainerStatus) DeepCopy() *AWSContainerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPeeringStatus. +func (in *AWSPeeringStatus) DeepCopy() *AWSPeeringStatus { if in == nil { return nil } - out := new(AWSContainerStatus) + out := new(AWSPeeringStatus) in.DeepCopyInto(out) return out } @@ -228,6 +228,37 @@ func (in *AtlasNetworkPeer) DeepCopy() *AtlasNetworkPeer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkPeeringStatus) DeepCopyInto(out *AtlasNetworkPeeringStatus) { + *out = *in + in.Common.DeepCopyInto(&out.Common) + if in.AWSStatus != nil { + in, out := &in.AWSStatus, &out.AWSStatus + *out = new(AWSPeeringStatus) + **out = **in + } + if in.AzureStatus != nil { + in, out := &in.AzureStatus, &out.AzureStatus + *out = new(AzurePeeringStatus) + **out = **in + } + if in.GCPStatus != nil { + in, out := &in.GCPStatus, &out.GCPStatus + *out = new(GCPPeeringStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkPeeringStatus. +func (in *AtlasNetworkPeeringStatus) DeepCopy() *AtlasNetworkPeeringStatus { + if in == nil { + return nil + } + out := new(AtlasNetworkPeeringStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AtlasPrivateEndpointStatus) DeepCopyInto(out *AtlasPrivateEndpointStatus) { *out = *in @@ -387,16 +418,16 @@ func (in *AtlasStreamInstanceStatus) DeepCopy() *AtlasStreamInstanceStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureContainerStatus) DeepCopyInto(out *AzureContainerStatus) { +func (in *AzurePeeringStatus) DeepCopyInto(out *AzurePeeringStatus) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureContainerStatus. -func (in *AzureContainerStatus) DeepCopy() *AzureContainerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePeeringStatus. +func (in *AzurePeeringStatus) DeepCopy() *AzurePeeringStatus { if in == nil { return nil } - out := new(AzureContainerStatus) + out := new(AzurePeeringStatus) in.DeepCopyInto(out) return out } @@ -635,46 +666,46 @@ func (in *FeatureUsage) DeepCopy() *FeatureUsage { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPContainerStatus) DeepCopyInto(out *GCPContainerStatus) { +func (in *GCPEndpoint) DeepCopyInto(out *GCPEndpoint) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPContainerStatus. -func (in *GCPContainerStatus) DeepCopy() *GCPContainerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEndpoint. +func (in *GCPEndpoint) DeepCopy() *GCPEndpoint { if in == nil { return nil } - out := new(GCPContainerStatus) + out := new(GCPEndpoint) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPEndpoint) DeepCopyInto(out *GCPEndpoint) { +func (in *GCPForwardingRule) DeepCopyInto(out *GCPForwardingRule) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEndpoint. -func (in *GCPEndpoint) DeepCopy() *GCPEndpoint { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPForwardingRule. +func (in *GCPForwardingRule) DeepCopy() *GCPForwardingRule { if in == nil { return nil } - out := new(GCPEndpoint) + out := new(GCPForwardingRule) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPForwardingRule) DeepCopyInto(out *GCPForwardingRule) { +func (in *GCPPeeringStatus) DeepCopyInto(out *GCPPeeringStatus) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPForwardingRule. -func (in *GCPForwardingRule) DeepCopy() *GCPForwardingRule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPeeringStatus. +func (in *GCPPeeringStatus) DeepCopy() *GCPPeeringStatus { if in == nil { return nil } - out := new(GCPForwardingRule) + out := new(GCPPeeringStatus) in.DeepCopyInto(out) return out } diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 44cbc2da99..1eb7108046 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1197,6 +1197,33 @@ func (in *AtlasNetworkContainerSpec) DeepCopy() *AtlasNetworkContainerSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkPeering) DeepCopyInto(out *AtlasNetworkPeering) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkPeering. +func (in *AtlasNetworkPeering) DeepCopy() *AtlasNetworkPeering { + if in == nil { + return nil + } + out := new(AtlasNetworkPeering) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AtlasNetworkPeering) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AtlasNetworkPeeringConfig) DeepCopyInto(out *AtlasNetworkPeeringConfig) { *out = *in @@ -1227,6 +1254,56 @@ func (in *AtlasNetworkPeeringConfig) DeepCopy() *AtlasNetworkPeeringConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkPeeringList) DeepCopyInto(out *AtlasNetworkPeeringList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AtlasNetworkPeering, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkPeeringList. +func (in *AtlasNetworkPeeringList) DeepCopy() *AtlasNetworkPeeringList { + if in == nil { + return nil + } + out := new(AtlasNetworkPeeringList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AtlasNetworkPeeringList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasNetworkPeeringSpec) DeepCopyInto(out *AtlasNetworkPeeringSpec) { + *out = *in + in.ProjectDualReference.DeepCopyInto(&out.ProjectDualReference) + out.ContainerRef = in.ContainerRef + in.AtlasNetworkPeeringConfig.DeepCopyInto(&out.AtlasNetworkPeeringConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasNetworkPeeringSpec. +func (in *AtlasNetworkPeeringSpec) DeepCopy() *AtlasNetworkPeeringSpec { + if in == nil { + return nil + } + out := new(AtlasNetworkPeeringSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AtlasOnDemandPolicy) DeepCopyInto(out *AtlasOnDemandPolicy) { *out = *in @@ -1492,21 +1569,6 @@ func (in *AtlasProjectSpec) DeepCopy() *AtlasProjectSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AtlasProviderContainerConfig) DeepCopyInto(out *AtlasProviderContainerConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasProviderContainerConfig. -func (in *AtlasProviderContainerConfig) DeepCopy() *AtlasProviderContainerConfig { - if in == nil { - return nil - } - out := new(AtlasProviderContainerConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AtlasSearchIndexAnalyzer) DeepCopyInto(out *AtlasSearchIndexAnalyzer) { *out = *in @@ -2210,6 +2272,21 @@ func (in *ConnectionStrings) DeepCopy() *ConnectionStrings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDualReference) DeepCopyInto(out *ContainerDualReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDualReference. +func (in *ContainerDualReference) DeepCopy() *ContainerDualReference { + if in == nil { + return nil + } + out := new(ContainerDualReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CopySetting) DeepCopyInto(out *CopySetting) { *out = *in diff --git a/config/crd/bases/atlas.mongodb.com_atlasnetworkpeerings.yaml b/config/crd/bases/atlas.mongodb.com_atlasnetworkpeerings.yaml new file mode 100644 index 0000000000..5175f42454 --- /dev/null +++ b/config/crd/bases/atlas.mongodb.com_atlasnetworkpeerings.yaml @@ -0,0 +1,286 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: atlasnetworkpeerings.atlas.mongodb.com +spec: + group: atlas.mongodb.com + names: + categories: + - atlas + kind: AtlasNetworkPeering + listKind: AtlasNetworkPeeringList + plural: atlasnetworkpeerings + shortNames: + - anp + singular: atlasnetworkpeering + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.provider + name: Provider + type: string + - jsonPath: .status.id + name: Id + type: string + - jsonPath: .status.status + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: AtlasNetworkPeering is the Schema for the AtlasNetworkPeering + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AtlasNetworkPeeringSpec defines the desired state of AtlasNetworkPeering + properties: + awsConfiguration: + description: AWSConfiguration is the specific AWS settings for network + peering + properties: + accepterRegionName: + description: AccepterRegionName is the provider region name of + user's vpc in AWS native region format + type: string + awsAccountId: + description: AccountID of the user's vpc. + type: string + routeTableCidrBlock: + description: User VPC CIDR. + type: string + vpcId: + description: AWS VPC ID. + type: string + required: + - accepterRegionName + - awsAccountId + - routeTableCidrBlock + - vpcId + type: object + azureConfiguration: + description: AzureConfiguration is the specific Azure settings for + network peering + properties: + azureDirectoryId: + description: AzureDirectoryID is the unique identifier for an + Azure AD directory. + type: string + azureSubscriptionId: + description: AzureSubscriptionID is the unique identifier of the + Azure subscription in which the VNet resides. + type: string + resourceGroupName: + description: ResourceGroupName is the name of your Azure resource + group. + type: string + vNetName: + description: VNetName is name of your Azure VNet. Its applicable + only for Azure. + type: string + required: + - azureDirectoryId + - azureSubscriptionId + - resourceGroupName + - vNetName + type: object + connectionSecret: + description: Name of the secret containing Atlas API private and public + keys + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + containerRef: + description: ContainerDualReference refers to an Network Container + either by Kubernetes name or Atlas ID + properties: + id: + description: |- + ID is the Atlas identifier of the Network Container Atlas resource this Peering Connection relies on + Use either name or ID, not both. + type: string + name: + description: |- + Name of the container Kubernetes resource, must be present in the same namespace + Use either name or ID, not both. + type: string + type: object + externalProjectRef: + description: |- + "externalProjectRef" holds the parent Atlas project ID. + Mutually exclusive with the "projectRef" field + properties: + id: + description: ID is the Atlas project ID + type: string + required: + - id + type: object + gcpConfiguration: + description: GCPConfiguration is the specific Google Cloud settings + for network peering + properties: + gcpProjectId: + description: User GCP Project ID. Its applicable only for GCP. + type: string + networkName: + description: GCP Network Peer Name. Its applicable only for GCP. + type: string + required: + - gcpProjectId + - networkName + type: object + projectRef: + description: |- + "projectRef" is a reference to the parent AtlasProject resource. + Mutually exclusive with the "externalProjectRef" field + properties: + name: + description: Name is the name of the Kubernetes Resource + type: string + namespace: + description: Namespace is the namespace of the Kubernetes Resource + type: string + required: + - name + type: object + provider: + description: Name of the cloud service provider for which you want + to create the network peering service. + enum: + - AWS + - GCP + - AZURE + type: string + required: + - containerRef + - provider + type: object + x-kubernetes-validations: + - message: must define only one project reference through externalProjectRef + or projectRef + rule: (has(self.externalProjectRef) && !has(self.projectRef)) || (!has(self.externalProjectRef) + && has(self.projectRef)) + - message: must define a local connection secret when referencing an external + project + rule: (has(self.externalProjectRef) && has(self.connectionSecret)) || + !has(self.externalProjectRef) + - message: must either have a container Atlas id or Kubernetes name, but + not both (or neither) + rule: (has(self.containerRef.name) && !has(self.containerRef.id)) || + (!has(self.containerRef.name) && has(self.containerRef.id)) + status: + description: |- + AtlasNetworkPeeringStatus is a status for the AtlasNetworkPeering Custom resource. + Not the one included in the AtlasProject + properties: + awsStatus: + description: AWSStatus contains AWS only related status information + properties: + connectionId: + description: ConnectionID is the AWS VPC peering connection ID + type: string + vpcId: + description: VpcID is AWS VPC id on the Atlas side + type: string + type: object + azureStatus: + description: AzureStatus contains Azure only related status information + properties: + azureSubscriptionIDpcId: + description: AzureSubscriptionID is Azure Subscription id on the + Atlas side + type: string + vNetName: + description: VnetName is Azure network on the Atlas side + type: string + type: object + conditions: + description: Conditions is the list of statuses showing the current + state of the Atlas Custom Resource + items: + description: Condition describes the state of an Atlas Custom Resource + at a certain point. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of Atlas Custom Resource condition. + type: string + required: + - status + - type + type: object + type: array + gcpStatus: + description: GCPStatus contains GCP only related status information + properties: + gcpProjectID: + description: GCPProjectID is GCP project on the Atlas side + type: string + networkName: + description: NetworkName is GCP network on the Atlas side + type: string + type: object + id: + description: ID recrods the identified of the peer created by Atlas + type: string + observedGeneration: + description: |- + ObservedGeneration indicates the generation of the resource specification that the Atlas Operator is aware of. + The Atlas Operator updates this field to the 'metadata.generation' as soon as it starts reconciliation of the resource. + format: int64 + type: integer + status: + description: Status describes the last status seen for the network + peering setup + type: string + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index ca157a1f8a..f9c3174932 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -18,5 +18,6 @@ resources: - bases/atlas.mongodb.com_atlascustomroles.yaml - bases/atlas.mongodb.com_atlasipaccesslists.yaml - bases/atlas.mongodb.com_atlasnetworkcontainers.yaml + - bases/atlas.mongodb.com_atlasnetworkpeerings.yaml configurations: - kustomizeconfig.yaml diff --git a/config/rbac/atlasnetworkpeering_editor_role.yaml b/config/rbac/atlasnetworkpeering_editor_role.yaml new file mode 100644 index 0000000000..9b058882d5 --- /dev/null +++ b/config/rbac/atlasnetworkpeering_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit atlasnetworkpeerings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: atlasnetworkpeering-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ako-scaffolding + app.kubernetes.io/part-of: ako-scaffolding + app.kubernetes.io/managed-by: kustomize + name: atlasnetworkpeering-editor-role +rules: +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkpeerings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkpeerings/status + verbs: + - get diff --git a/config/rbac/atlasnetworkpeering_viewer_role.yaml b/config/rbac/atlasnetworkpeering_viewer_role.yaml new file mode 100644 index 0000000000..0bacac7296 --- /dev/null +++ b/config/rbac/atlasnetworkpeering_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view atlasnetworkpeerings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: atlasnetworkpeering-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ako-scaffolding + app.kubernetes.io/part-of: ako-scaffolding + app.kubernetes.io/managed-by: kustomize + name: atlasnetworkpeering-viewer-role +rules: +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkpeerings + verbs: + - get + - list + - watch +- apiGroups: + - atlas.mongodb.com + resources: + - atlasnetworkpeerings/status + verbs: + - get diff --git a/config/rbac/clusterwide/role.yaml b/config/rbac/clusterwide/role.yaml index ddbdf1007f..40ebcf4087 100644 --- a/config/rbac/clusterwide/role.yaml +++ b/config/rbac/clusterwide/role.yaml @@ -36,6 +36,7 @@ rules: - atlasfederatedauths - atlasipaccesslists - atlasnetworkcontainers + - atlasnetworkpeerings - atlasprivateendpoints - atlasprojects - atlassearchindexconfigs @@ -63,6 +64,7 @@ rules: - atlasfederatedauths/status - atlasipaccesslists/status - atlasnetworkcontainers/status + - atlasnetworkpeerings/status - atlasprivateendpoints/status - atlasprojects/status - atlassearchindexconfigs/status @@ -78,5 +80,6 @@ rules: resources: - atlasipaccesslists/finalizers - atlasnetworkcontainers/finalizers + - atlasnetworkpeerings/finalizers verbs: - update diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index dd79bd08fc..49ea518436 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -50,3 +50,7 @@ resources: - atlasproject_viewer_role.yaml - atlasdeployment_editor_role.yaml - atlasdeployment_viewer_role.yaml +- atlasnetworkpeering_editor_role.yaml +- atlasnetworkpeering_viewer_role.yaml + + diff --git a/config/samples/atlas_v1_atlasnetworkpeering.yaml b/config/samples/atlas_v1_atlasnetworkpeering.yaml new file mode 100644 index 0000000000..00b6e26e9c --- /dev/null +++ b/config/samples/atlas_v1_atlasnetworkpeering.yaml @@ -0,0 +1,17 @@ +apiVersion: atlas.mongodb.com/v1 +kind: AtlasNetworkPeering +metadata: + name: atlasnetworkpeering-sample +spec: + projectRef: + name: atlas-project + namespace: namespace + provider: AWS + containerId: "623412394512350" + awsConfiguration: + accepterRegionName: US_EAST_1 # TODO: double check sample values + awsAccountId: "23214235145" + routeTableCidrBlock: "10.11.0.0/16" + vpcId: "vpcid-7123782134" + containerRegion: US_EAST_1 + atlasCidrBlock: "10.12.0.0/16" diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index d7cdad3549..8c532fc5a4 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -10,4 +10,5 @@ resources: - atlas_v1_atlasteam.yaml - atlas_v1_atlasipaccesslist.yaml - atlas_v1_atlasnetworkcontainer.yaml + - atlas_v1_atlasnetworkpeering.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/devbox.lock b/devbox.lock index d637013da4..54812b5166 100644 --- a/devbox.lock +++ b/devbox.lock @@ -50,114 +50,114 @@ } }, "awscli2@latest": { - "last_modified": "2025-02-04T04:50:32Z", - "resolved": "github:NixOS/nixpkgs/95ea544c84ebed84a31896b0ecea2570e5e0e236#awscli2", + "last_modified": "2025-02-07T11:26:36Z", + "resolved": "github:NixOS/nixpkgs/d98abf5cf5914e5e4e9d57205e3af55ca90ffc1d#awscli2", "source": "devbox-search", - "version": "2.23.5", + "version": "2.23.11", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/ha82a948pgcxz28f8pkzppabbbdvcy4n-awscli2-2.23.5", + "path": "/nix/store/xgc375fqnx7i91psclrxmazgxb328n0v-awscli2-2.23.11", "default": true }, { "name": "dist", - "path": "/nix/store/bdlg3x5ar6vzvv19n7xd4mibj4f9ifhh-awscli2-2.23.5-dist" + "path": "/nix/store/qzbmzsdc3rf35m8qwmi5gh9gw5f30haa-awscli2-2.23.11-dist" } ], - "store_path": "/nix/store/ha82a948pgcxz28f8pkzppabbbdvcy4n-awscli2-2.23.5" + "store_path": "/nix/store/xgc375fqnx7i91psclrxmazgxb328n0v-awscli2-2.23.11" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/wry0dz12qw5qd10cbdyhasc2mda2vpj2-awscli2-2.23.5", + "path": "/nix/store/lvzxygzya67f3xnnkb99qqjhvhizqmn8-awscli2-2.23.11", "default": true }, { "name": "dist", - "path": "/nix/store/x1p39yaxxp6xfpmx0vzn3v5kn1hapgjc-awscli2-2.23.5-dist" + "path": "/nix/store/rp6zsiw7ajssnnxm8gb5vrqz89k9wy96-awscli2-2.23.11-dist" } ], - "store_path": "/nix/store/wry0dz12qw5qd10cbdyhasc2mda2vpj2-awscli2-2.23.5" + "store_path": "/nix/store/lvzxygzya67f3xnnkb99qqjhvhizqmn8-awscli2-2.23.11" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/9jif79b32ykpydflmdmqpc77pyylvcb2-awscli2-2.23.5", + "path": "/nix/store/5mg2kvwga04jghvngskhmwmwrk1f8i5z-awscli2-2.23.11", "default": true }, { "name": "dist", - "path": "/nix/store/n89ai9vnmmphfrisbb0albivixiv8rhz-awscli2-2.23.5-dist" + "path": "/nix/store/n3na3vqiavqcd7r6g4g1vnhq5hic7xiq-awscli2-2.23.11-dist" } ], - "store_path": "/nix/store/9jif79b32ykpydflmdmqpc77pyylvcb2-awscli2-2.23.5" + "store_path": "/nix/store/5mg2kvwga04jghvngskhmwmwrk1f8i5z-awscli2-2.23.11" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/7ygka040zcgz8ylr1g37h2hygbw0jzj6-awscli2-2.23.5", + "path": "/nix/store/29xal5qblw03j0kfs7jxsh3zxw35626d-awscli2-2.23.11", "default": true }, { "name": "dist", - "path": "/nix/store/5zq2rnqfcs02zj9cc32cx7lkk97jcpkr-awscli2-2.23.5-dist" + "path": "/nix/store/3a2a50la3rzsxkl14hia26zrsal4hq6m-awscli2-2.23.11-dist" } ], - "store_path": "/nix/store/7ygka040zcgz8ylr1g37h2hygbw0jzj6-awscli2-2.23.5" + "store_path": "/nix/store/29xal5qblw03j0kfs7jxsh3zxw35626d-awscli2-2.23.11" } } }, "cosign@latest": { - "last_modified": "2025-01-19T08:16:51Z", - "resolved": "github:NixOS/nixpkgs/50165c4f7eb48ce82bd063e1fb8047a0f515f8ce#cosign", + "last_modified": "2025-02-05T14:25:15Z", + "resolved": "github:NixOS/nixpkgs/ccfae3057498f5a740be4c5a13aa800813a13084#cosign", "source": "devbox-search", - "version": "2.4.1", + "version": "2.4.2", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/z2ks68i8nbzp0v4bv6rgfl497m3wqg7m-cosign-2.4.1", + "path": "/nix/store/d9slnanpm7f6lqp1dgvn2a2rkcwkiw65-cosign-2.4.2", "default": true } ], - "store_path": "/nix/store/z2ks68i8nbzp0v4bv6rgfl497m3wqg7m-cosign-2.4.1" + "store_path": "/nix/store/d9slnanpm7f6lqp1dgvn2a2rkcwkiw65-cosign-2.4.2" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/bg5vb890i6dwcdlr89ihqr1wzyassw1i-cosign-2.4.1", + "path": "/nix/store/iq7a8a4lbhn9ckww6kdg8ddrq9m2ig8k-cosign-2.4.2", "default": true } ], - "store_path": "/nix/store/bg5vb890i6dwcdlr89ihqr1wzyassw1i-cosign-2.4.1" + "store_path": "/nix/store/iq7a8a4lbhn9ckww6kdg8ddrq9m2ig8k-cosign-2.4.2" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/hnqflpvi8z5g2c5jlmds1czxz0qwk6nv-cosign-2.4.1", + "path": "/nix/store/ca4ww2jlgig9fmqyvkvqg6pn54yjr7hm-cosign-2.4.2", "default": true } ], - "store_path": "/nix/store/hnqflpvi8z5g2c5jlmds1czxz0qwk6nv-cosign-2.4.1" + "store_path": "/nix/store/ca4ww2jlgig9fmqyvkvqg6pn54yjr7hm-cosign-2.4.2" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/wxhiz0ljc4l62mghyriiib6sk7k1j47x-cosign-2.4.1", + "path": "/nix/store/2i3ikkaxxax6sh1ni5kx7a3rr5i428qv-cosign-2.4.2", "default": true } ], - "store_path": "/nix/store/wxhiz0ljc4l62mghyriiib6sk7k1j47x-cosign-2.4.1" + "store_path": "/nix/store/2i3ikkaxxax6sh1ni5kx7a3rr5i428qv-cosign-2.4.2" } } }, @@ -201,11 +201,11 @@ "outputs": [ { "name": "out", - "path": "/nix/store/h7y41n9csx34yc3qir8lfqyvh0kza12y-docker-sbom-0.6.1", + "path": "/nix/store/0h8v1zzh289kjjmfpg9bnrna5s626dic-docker-sbom-0.6.1", "default": true } ], - "store_path": "/nix/store/h7y41n9csx34yc3qir8lfqyvh0kza12y-docker-sbom-0.6.1" + "store_path": "/nix/store/0h8v1zzh289kjjmfpg9bnrna5s626dic-docker-sbom-0.6.1" } } }, @@ -229,11 +229,11 @@ "outputs": [ { "name": "out", - "path": "/nix/store/ksmj926px4bm84klmvmq10zi5dhdv57i-docker-27.5.1", + "path": "/nix/store/mkbcmf8kfkkh977x7lj369j7c9lsfgvv-docker-27.5.1", "default": true } ], - "store_path": "/nix/store/ksmj926px4bm84klmvmq10zi5dhdv57i-docker-27.5.1" + "store_path": "/nix/store/mkbcmf8kfkkh977x7lj369j7c9lsfgvv-docker-27.5.1" }, "x86_64-darwin": { "outputs": [ @@ -258,8 +258,8 @@ } }, "gettext@latest": { - "last_modified": "2025-01-29T07:48:22Z", - "resolved": "github:NixOS/nixpkgs/9a5db3142ce450045840cc8d832b13b8a2018e0c#gettext", + "last_modified": "2025-02-03T03:32:17Z", + "resolved": "github:NixOS/nixpkgs/2d31b9476b7c6f5b029e595586b0b112a7ad130b#gettext", "source": "devbox-search", "version": "0.22.5", "systems": { @@ -290,24 +290,24 @@ "outputs": [ { "name": "out", - "path": "/nix/store/giprmfkym292i7qh6nd0bpa1ljznjw49-gettext-0.22.5", + "path": "/nix/store/wqjbyz853fjqcg7g2pfza152iavwydcv-gettext-0.22.5", "default": true }, { "name": "man", - "path": "/nix/store/1caj1wim9slzbhw74py9wrms11f6ca8a-gettext-0.22.5-man", + "path": "/nix/store/alwam3idy04bcvs3jljrim5g30rm2xq1-gettext-0.22.5-man", "default": true }, { "name": "doc", - "path": "/nix/store/1pzjmzvw5a7ygk3f8d95qpzk0fm1ry7s-gettext-0.22.5-doc" + "path": "/nix/store/7xfkwaikv50dfk3vkril6mah34b8w3cr-gettext-0.22.5-doc" }, { "name": "info", - "path": "/nix/store/mcggyfxk8b7jand1kbpxxcfciyvi1w5q-gettext-0.22.5-info" + "path": "/nix/store/5dgz5jzjwvnikvbkj3d4j9prybk9z359-gettext-0.22.5-info" } ], - "store_path": "/nix/store/giprmfkym292i7qh6nd0bpa1ljznjw49-gettext-0.22.5" + "store_path": "/nix/store/wqjbyz853fjqcg7g2pfza152iavwydcv-gettext-0.22.5" }, "x86_64-darwin": { "outputs": [ @@ -321,13 +321,13 @@ "path": "/nix/store/x46zbnd6fyx6ipcmsghaap4ns9cbs9lg-gettext-0.22.5-man", "default": true }, - { - "name": "doc", - "path": "/nix/store/sbw7hvw86hdbd5hqxqx8433b6yra4wzz-gettext-0.22.5-doc" - }, { "name": "info", "path": "/nix/store/v9z8hmpndzpxhw8701xn6ndl7z1m9bjq-gettext-0.22.5-info" + }, + { + "name": "doc", + "path": "/nix/store/sbw7hvw86hdbd5hqxqx8433b6yra4wzz-gettext-0.22.5-doc" } ], "store_path": "/nix/store/mb0lxwk2l23nxa4f2x5d6s6hblf2gl1d-gettext-0.22.5" @@ -336,24 +336,24 @@ "outputs": [ { "name": "out", - "path": "/nix/store/m4454h58k2jyrpz9r96zby1aid2fv9qd-gettext-0.22.5", + "path": "/nix/store/r48y0ydp5f92if0ribjln4hcg2jkqif5-gettext-0.22.5", "default": true }, { "name": "man", - "path": "/nix/store/pdb6kr6mw6qvwhw6ih9da51jjpnmz87w-gettext-0.22.5-man", + "path": "/nix/store/0qi0d1irsfzmb6nnwj5gv601viglypl0-gettext-0.22.5-man", "default": true }, { - "name": "doc", - "path": "/nix/store/060h4lpb7ny25mkpjcnwijmca4njcf5l-gettext-0.22.5-doc" + "name": "info", + "path": "/nix/store/qnvfdd17d4szk8skc5cwppzmni7g0bgv-gettext-0.22.5-info" }, { - "name": "info", - "path": "/nix/store/c937qmkc0qd9l1xin1is1g9rxb2xxwzh-gettext-0.22.5-info" + "name": "doc", + "path": "/nix/store/88cnigg32g6x4wk5qxrwx62dxi50gdbr-gettext-0.22.5-doc" } ], - "store_path": "/nix/store/m4454h58k2jyrpz9r96zby1aid2fv9qd-gettext-0.22.5" + "store_path": "/nix/store/r48y0ydp5f92if0ribjln4hcg2jkqif5-gettext-0.22.5" } } }, @@ -377,11 +377,11 @@ "outputs": [ { "name": "out", - "path": "/nix/store/0ig5kns58h27y377xnxhimgcjl4wc4cb-ginkgo-2.22.2", + "path": "/nix/store/dr6iqm8qk3a04s0i4s07lclxnycnjws5-ginkgo-2.22.2", "default": true } ], - "store_path": "/nix/store/0ig5kns58h27y377xnxhimgcjl4wc4cb-ginkgo-2.22.2" + "store_path": "/nix/store/dr6iqm8qk3a04s0i4s07lclxnycnjws5-ginkgo-2.22.2" }, "x86_64-darwin": { "outputs": [ @@ -406,74 +406,74 @@ } }, "git@latest": { - "last_modified": "2025-01-19T08:16:51Z", - "resolved": "github:NixOS/nixpkgs/50165c4f7eb48ce82bd063e1fb8047a0f515f8ce#git", + "last_modified": "2025-02-07T11:26:36Z", + "resolved": "github:NixOS/nixpkgs/d98abf5cf5914e5e4e9d57205e3af55ca90ffc1d#git", "source": "devbox-search", - "version": "2.47.1", + "version": "2.47.2", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/6jr29hgw3sa439df6bd4mh2c6rmpf68j-git-2.47.1", + "path": "/nix/store/9z3jhc0rlj3zaw8nd1zka9vli6w0q11g-git-2.47.2", "default": true }, { "name": "doc", - "path": "/nix/store/j3w81894k2014nsl8c9k4fkb5zrjdjjn-git-2.47.1-doc" + "path": "/nix/store/rh151iwgy4h8yv8kxd5facw57cyj0bav-git-2.47.2-doc" } ], - "store_path": "/nix/store/6jr29hgw3sa439df6bd4mh2c6rmpf68j-git-2.47.1" + "store_path": "/nix/store/9z3jhc0rlj3zaw8nd1zka9vli6w0q11g-git-2.47.2" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/l05864n3qnk0gp3cy8mijkk78p84xvdj-git-2.47.1", + "path": "/nix/store/gx5y37qcfqdvn0h6swjd04dmqjjh3nk7-git-2.47.2", "default": true }, { "name": "debug", - "path": "/nix/store/s5gjjyjdmcw49z8ynqgc4r8r8fxxa71i-git-2.47.1-debug" + "path": "/nix/store/8vfpmf3vjgzl2psip76p0f9h11sb6y3p-git-2.47.2-debug" }, { "name": "doc", - "path": "/nix/store/cxjbaw9xrzxx9m9dz9syydnjagwxvy5h-git-2.47.1-doc" + "path": "/nix/store/c25mq3q83dvw3k5pb0qr5333g3cycylq-git-2.47.2-doc" } ], - "store_path": "/nix/store/l05864n3qnk0gp3cy8mijkk78p84xvdj-git-2.47.1" + "store_path": "/nix/store/gx5y37qcfqdvn0h6swjd04dmqjjh3nk7-git-2.47.2" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/y9ilj7w794c9il2s04xx5ikg9kf7zmm4-git-2.47.1", + "path": "/nix/store/39xx5gx3hxigs1b5ldw5i2jr84vsn3rf-git-2.47.2", "default": true }, { "name": "doc", - "path": "/nix/store/0pr5g6bf0vcmmcrb1hdi4s4m5dqzczpc-git-2.47.1-doc" + "path": "/nix/store/xmh2djjrnbpiqqgpblrcbavnqh0nv4km-git-2.47.2-doc" } ], - "store_path": "/nix/store/y9ilj7w794c9il2s04xx5ikg9kf7zmm4-git-2.47.1" + "store_path": "/nix/store/39xx5gx3hxigs1b5ldw5i2jr84vsn3rf-git-2.47.2" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/7mln3k7mmvsnpgp47zr63w9d60pqba9n-git-2.47.1", + "path": "/nix/store/33g65w5cc9n8fr0hxj84282xmv4l7hyl-git-2.47.2", "default": true }, { - "name": "debug", - "path": "/nix/store/0yk2g1fyf6wnari6qjy0x5ljkg5lirci-git-2.47.1-debug" + "name": "doc", + "path": "/nix/store/lb4nipdhlwrxdavz7gdkcik6lkz3cbdm-git-2.47.2-doc" }, { - "name": "doc", - "path": "/nix/store/4i5f6hdjmjn70m4qfvg8ymg9abhszlrp-git-2.47.1-doc" + "name": "debug", + "path": "/nix/store/jyz4nvcd3bci4vg2sfsmvrq0fp9mzr5a-git-2.47.2-debug" } ], - "store_path": "/nix/store/7mln3k7mmvsnpgp47zr63w9d60pqba9n-git-2.47.1" + "store_path": "/nix/store/33g65w5cc9n8fr0hxj84282xmv4l7hyl-git-2.47.2" } } }, @@ -593,11 +593,11 @@ "outputs": [ { "name": "out", - "path": "/nix/store/jyvj1br09bjn49vjjammicv5g6ypanyr-gotests-1.6.0", + "path": "/nix/store/ji0aqa0xfjfwk801xkm8h8w5c8g1jajg-gotests-1.6.0", "default": true } ], - "store_path": "/nix/store/jyvj1br09bjn49vjjammicv5g6ypanyr-gotests-1.6.0" + "store_path": "/nix/store/ji0aqa0xfjfwk801xkm8h8w5c8g1jajg-gotests-1.6.0" }, "x86_64-darwin": { "outputs": [ @@ -670,8 +670,8 @@ } }, "govulncheck@latest": { - "last_modified": "2025-01-19T08:16:51Z", - "resolved": "github:NixOS/nixpkgs/50165c4f7eb48ce82bd063e1fb8047a0f515f8ce#govulncheck", + "last_modified": "2025-02-07T20:06:47Z", + "resolved": "github:NixOS/nixpkgs/e8d0b02af0958823c955aaab3c82b03f54411d91#govulncheck", "source": "devbox-search", "version": "1.1.4", "systems": { @@ -679,31 +679,31 @@ "outputs": [ { "name": "out", - "path": "/nix/store/66m3ic0cdzdmki3hxyzmnq00v6xasvhs-govulncheck-1.1.4", + "path": "/nix/store/04cngpd5js88f7rn067gnx8k82fx3j8x-govulncheck-1.1.4", "default": true } ], - "store_path": "/nix/store/66m3ic0cdzdmki3hxyzmnq00v6xasvhs-govulncheck-1.1.4" + "store_path": "/nix/store/04cngpd5js88f7rn067gnx8k82fx3j8x-govulncheck-1.1.4" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/nkjsd5pvr50ks81p1qbyx3cz1sq5df8m-govulncheck-1.1.4", + "path": "/nix/store/gmc4xxs0rg0iypn8sjmcs4g5h4w1pyjz-govulncheck-1.1.4", "default": true } ], - "store_path": "/nix/store/nkjsd5pvr50ks81p1qbyx3cz1sq5df8m-govulncheck-1.1.4" + "store_path": "/nix/store/gmc4xxs0rg0iypn8sjmcs4g5h4w1pyjz-govulncheck-1.1.4" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/0rq1913grr029p5g023l8i47fnsf6vmx-govulncheck-1.1.4", + "path": "/nix/store/jjwy6ggwnl96gq60jimmazp4pvgdy88a-govulncheck-1.1.4", "default": true } ], - "store_path": "/nix/store/0rq1913grr029p5g023l8i47fnsf6vmx-govulncheck-1.1.4" + "store_path": "/nix/store/jjwy6ggwnl96gq60jimmazp4pvgdy88a-govulncheck-1.1.4" }, "x86_64-linux": { "outputs": [ @@ -718,8 +718,8 @@ } }, "jq@latest": { - "last_modified": "2025-02-04T14:17:43Z", - "resolved": "github:NixOS/nixpkgs/00769b0532199db4e1bda59865f00f3a86232c75#jq", + "last_modified": "2025-02-09T21:53:45Z", + "resolved": "github:NixOS/nixpkgs/b2243f41e860ac85c0b446eadc6930359b294e79#jq", "source": "devbox-search", "version": "1.7.1", "systems": { @@ -727,55 +727,55 @@ "outputs": [ { "name": "bin", - "path": "/nix/store/ri8k487849v9b290iz384pi1lgw7n585-jq-1.7.1-bin", + "path": "/nix/store/lmlmb3a5kzza0si8xfghr7x17vg8bzxb-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/l9nf6sjs9hyvjfkmm9njw00mmvh88f6n-jq-1.7.1-man", + "path": "/nix/store/afygplc4dm1ry7ww3702wafvy8bs9sxc-jq-1.7.1-man", "default": true }, - { - "name": "dev", - "path": "/nix/store/n7880d10c16s05bzg4xh4ak9j7fz9440-jq-1.7.1-dev" - }, { "name": "doc", - "path": "/nix/store/axim640z0z67x48qr8nq79qph5j3vj05-jq-1.7.1-doc" + "path": "/nix/store/dw2717n906kj3xp090fg9cqvcm747jic-jq-1.7.1-doc" }, { "name": "out", - "path": "/nix/store/sj86jzxi9p5pipih6fx7sw9pw6j54qbl-jq-1.7.1" + "path": "/nix/store/jm1bv0cha32k9967sv0z40kqgn5slz4i-jq-1.7.1" + }, + { + "name": "dev", + "path": "/nix/store/89q76wghz9xj2myglfwpv0n6yc19c8i6-jq-1.7.1-dev" } ], - "store_path": "/nix/store/ri8k487849v9b290iz384pi1lgw7n585-jq-1.7.1-bin" + "store_path": "/nix/store/lmlmb3a5kzza0si8xfghr7x17vg8bzxb-jq-1.7.1-bin" }, "aarch64-linux": { "outputs": [ { "name": "bin", - "path": "/nix/store/z5vhfsyh4yh7f018q036q260h363a56w-jq-1.7.1-bin", + "path": "/nix/store/vwpm32inl7g5w4p1hqkhjlj1wv0ic67y-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/hvb2dfhc1k02abmai7n3bxi2w7i85k3q-jq-1.7.1-man", + "path": "/nix/store/m546xg6bwy8i785z5a7nx1ca3k9isd93-jq-1.7.1-man", "default": true }, - { - "name": "out", - "path": "/nix/store/qky6im9rkx32bkngy05lgf2j2mhc9sa1-jq-1.7.1" - }, { "name": "dev", - "path": "/nix/store/6xqw0a045m2s6kg7bdx8f0y88x55iw18-jq-1.7.1-dev" + "path": "/nix/store/fn8f3hh3k8z3xxkl6al6qx78577w4hdx-jq-1.7.1-dev" }, { "name": "doc", - "path": "/nix/store/7120g78wnwvhsazmd65g3v6hkn7fijj6-jq-1.7.1-doc" + "path": "/nix/store/jzcllz8ry9hnlpq954ik9fi215iir24l-jq-1.7.1-doc" + }, + { + "name": "out", + "path": "/nix/store/msmdzk125aal9mj8d0gd7r4sygh0mg4m-jq-1.7.1" } ], - "store_path": "/nix/store/z5vhfsyh4yh7f018q036q260h363a56w-jq-1.7.1-bin" + "store_path": "/nix/store/vwpm32inl7g5w4p1hqkhjlj1wv0ic67y-jq-1.7.1-bin" }, "x86_64-darwin": { "outputs": [ @@ -808,28 +808,28 @@ "outputs": [ { "name": "bin", - "path": "/nix/store/69msxmwsxfbxx8mzigzrfppgz6qk1sx8-jq-1.7.1-bin", + "path": "/nix/store/n4xfh00cw7vnwnrlx9asp545z82pazgc-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/0j2mhyyc4rp63wlqip4n9j356nlcqznv-jq-1.7.1-man", + "path": "/nix/store/fj5sc6yb8v39min7kavc0i24nwmlsyki-jq-1.7.1-man", "default": true }, { "name": "dev", - "path": "/nix/store/135pr45fck6iqk1mym8rlb9hz0vxn2cj-jq-1.7.1-dev" + "path": "/nix/store/lcjisy815hp4agl57xqh2w6mic1v8jlf-jq-1.7.1-dev" }, { "name": "doc", - "path": "/nix/store/bnzrjq3bpagqhd55mz09i8lx6l2qzwq4-jq-1.7.1-doc" + "path": "/nix/store/41ir7g8plmi9257c4g8ag94jl9vhkp4l-jq-1.7.1-doc" }, { "name": "out", - "path": "/nix/store/rxy1z6zcwgrj59gn91r561s3dj0xgjqg-jq-1.7.1" + "path": "/nix/store/4b9rswbcgqiqidglpz6nrwlsfkhi7v22-jq-1.7.1" } ], - "store_path": "/nix/store/69msxmwsxfbxx8mzigzrfppgz6qk1sx8-jq-1.7.1-bin" + "store_path": "/nix/store/n4xfh00cw7vnwnrlx9asp545z82pazgc-jq-1.7.1-bin" } } }, @@ -948,20 +948,20 @@ "outputs": [ { "name": "out", - "path": "/nix/store/v312kkwzk8mibc9z67n53hab6mcc7cn5-kubectl-1.32.1", + "path": "/nix/store/zph6v36yrnyixil25mnysr82i933kx6i-kubectl-1.32.1", "default": true }, { "name": "man", - "path": "/nix/store/jb9zc5f93z67xaldz5zmmnv8d5pzc94r-kubectl-1.32.1-man", + "path": "/nix/store/q843mipr11308vvvp7465yc7y4k5mg06-kubectl-1.32.1-man", "default": true }, { "name": "convert", - "path": "/nix/store/1lkaw0m4238bichjjl5jkkpqpvxahp43-kubectl-1.32.1-convert" + "path": "/nix/store/y3l02a82n3zzzdipccb1xpdw3yz68cb0-kubectl-1.32.1-convert" } ], - "store_path": "/nix/store/v312kkwzk8mibc9z67n53hab6mcc7cn5-kubectl-1.32.1" + "store_path": "/nix/store/zph6v36yrnyixil25mnysr82i933kx6i-kubectl-1.32.1" } } }, @@ -1182,8 +1182,8 @@ } }, "shellcheck@latest": { - "last_modified": "2025-01-19T08:16:51Z", - "resolved": "github:NixOS/nixpkgs/50165c4f7eb48ce82bd063e1fb8047a0f515f8ce#shellcheck", + "last_modified": "2025-02-07T11:26:36Z", + "resolved": "github:NixOS/nixpkgs/d98abf5cf5914e5e4e9d57205e3af55ca90ffc1d#shellcheck", "source": "devbox-search", "version": "0.10.0", "systems": { @@ -1191,25 +1191,25 @@ "outputs": [ { "name": "bin", - "path": "/nix/store/3dxs66z95mafvn1dsqhg4f0zlz8b4cqv-shellcheck-0.10.0-bin", + "path": "/nix/store/iggvrsj00j8py8q7i81cp3d1lvw1l3zw-shellcheck-0.10.0-bin", "default": true }, { "name": "man", - "path": "/nix/store/jvv73bnrkjyq2c615i0ck9svl6pf0k2g-shellcheck-0.10.0-man", + "path": "/nix/store/pb8v9fjqishjisml28lmby3iaadz45y8-shellcheck-0.10.0-man", "default": true }, { "name": "doc", - "path": "/nix/store/y1jpdlkaxxsgj6xikq1zlw81mdn3la43-shellcheck-0.10.0-doc", + "path": "/nix/store/1bfp33rkmfx7479aqh26f0glzc58gy5i-shellcheck-0.10.0-doc", "default": true }, { "name": "out", - "path": "/nix/store/9gd0zysiiq4m4mxxvnxr73ic2h99a7a1-shellcheck-0.10.0" + "path": "/nix/store/0x3i1pp5dy8wrviw8xwl5z99pxs5ijsz-shellcheck-0.10.0" } ], - "store_path": "/nix/store/3dxs66z95mafvn1dsqhg4f0zlz8b4cqv-shellcheck-0.10.0-bin" + "store_path": "/nix/store/iggvrsj00j8py8q7i81cp3d1lvw1l3zw-shellcheck-0.10.0-bin" }, "aarch64-linux": { "outputs": [ @@ -1239,25 +1239,25 @@ "outputs": [ { "name": "bin", - "path": "/nix/store/2wfg86v5m65843ac5ndxh49lkgz0ra62-shellcheck-0.10.0-bin", + "path": "/nix/store/mcxaqmamjv5zflgnaanm2ij8rv48saya-shellcheck-0.10.0-bin", "default": true }, { "name": "man", - "path": "/nix/store/j0ac5q3f8amcjlv7mpnarpc489kcmwch-shellcheck-0.10.0-man", + "path": "/nix/store/hfy0c1ih300h2f5piqj63yjnmf0hy1zh-shellcheck-0.10.0-man", "default": true }, { "name": "doc", - "path": "/nix/store/169a6625l5jwkkm4kmdsn9jwh7g6r5xi-shellcheck-0.10.0-doc", + "path": "/nix/store/5ph7g97j8sb9fw5a3sr9mvldfxzqlpcd-shellcheck-0.10.0-doc", "default": true }, { "name": "out", - "path": "/nix/store/qvwdimh65m17vim2pyasv35vnxa4xngp-shellcheck-0.10.0" + "path": "/nix/store/2nmcagz1w31xwifmyaydyphmd5m2pkpw-shellcheck-0.10.0" } ], - "store_path": "/nix/store/2wfg86v5m65843ac5ndxh49lkgz0ra62-shellcheck-0.10.0-bin" + "store_path": "/nix/store/mcxaqmamjv5zflgnaanm2ij8rv48saya-shellcheck-0.10.0-bin" }, "x86_64-linux": { "outputs": [ @@ -1315,21 +1315,21 @@ "outputs": [ { "name": "out", - "path": "/nix/store/4ww669j4yqlwxj4zmi80g852mgpjvn95-wget-1.25.0", + "path": "/nix/store/sx84x0r3aar3x6vl7zhl2k2n3fajbvg7-wget-1.25.0", "default": true } ], - "store_path": "/nix/store/4ww669j4yqlwxj4zmi80g852mgpjvn95-wget-1.25.0" + "store_path": "/nix/store/sx84x0r3aar3x6vl7zhl2k2n3fajbvg7-wget-1.25.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/v6rwczwhjw7za0lqw7rj0n8bfqdq4rpp-wget-1.25.0", + "path": "/nix/store/ards3290i5mb6ll7vybvgdmv21ay9fba-wget-1.25.0", "default": true } ], - "store_path": "/nix/store/v6rwczwhjw7za0lqw7rj0n8bfqdq4rpp-wget-1.25.0" + "store_path": "/nix/store/ards3290i5mb6ll7vybvgdmv21ay9fba-wget-1.25.0" } } }, @@ -1363,11 +1363,11 @@ "outputs": [ { "name": "out", - "path": "/nix/store/zzmwwabkgbqrqx1nld5fldwbp0izclqy-yq-go-4.45.1", + "path": "/nix/store/5v8g3w5ggravlps5qw49scidhk15dxgq-yq-go-4.45.1", "default": true } ], - "store_path": "/nix/store/zzmwwabkgbqrqx1nld5fldwbp0izclqy-yq-go-4.45.1" + "store_path": "/nix/store/5v8g3w5ggravlps5qw49scidhk15dxgq-yq-go-4.45.1" }, "x86_64-linux": { "outputs": [ diff --git a/helm-charts b/helm-charts index 747ea60a07..a09b6e4316 160000 --- a/helm-charts +++ b/helm-charts @@ -1 +1 @@ -Subproject commit 747ea60a07039dba3f637d436688d90be7277089 +Subproject commit a09b6e43166b72f8fcdf3f0c530ecfe730af9a4d diff --git a/internal/controller/atlasnetworkcontainer/state.go b/internal/controller/atlasnetworkcontainer/state.go index c9bd8cb3c7..d773432782 100644 --- a/internal/controller/atlasnetworkcontainer/state.go +++ b/internal/controller/atlasnetworkcontainer/state.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "reflect" ctrl "sigs.k8s.io/controller-runtime" @@ -16,6 +15,10 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" ) +const ( + typeName = "AtlasNetworkContainer" +) + type reconcileRequest struct { projectID string networkContainer *akov2.AtlasNetworkContainer @@ -23,7 +26,6 @@ type reconcileRequest struct { } func (r *AtlasNetworkContainerReconciler) handleCustomResource(ctx context.Context, networkContainer *akov2.AtlasNetworkContainer) (ctrl.Result, error) { - typeName := reflect.TypeOf(*networkContainer).Name() if customresource.ReconciliationShouldBeSkipped(networkContainer) { return r.Skip(ctx, typeName, networkContainer, networkContainer.Spec) } diff --git a/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go b/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go new file mode 100644 index 0000000000..504a65c520 --- /dev/null +++ b/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go @@ -0,0 +1,188 @@ +// Package atlasnetworkpeering holds the network peering controller +package atlasnetworkpeering + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "context" + "errors" + "reflect" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/indexer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" +) + +// AtlasNetworkPeeringReconciler reconciles a AtlasNetworkPeering object +type AtlasNetworkPeeringReconciler struct { + reconciler.AtlasReconciler + AtlasProvider atlas.Provider + Scheme *runtime.Scheme + EventRecorder record.EventRecorder + GlobalPredicates []predicate.Predicate + ObjectDeletionProtection bool + independentSyncPeriod time.Duration +} + +func NewAtlasNetworkPeeringsReconciler( + c cluster.Cluster, + predicates []predicate.Predicate, + atlasProvider atlas.Provider, + deletionProtection bool, + logger *zap.Logger, + independentSyncPeriod time.Duration, +) *AtlasNetworkPeeringReconciler { + return &AtlasNetworkPeeringReconciler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: c.GetClient(), + Log: logger.Named("controllers").Named("AtlasNetworkPeering").Sugar(), + }, + Scheme: c.GetScheme(), + EventRecorder: c.GetEventRecorderFor("AtlasPrivateEndpoint"), + AtlasProvider: atlasProvider, + GlobalPredicates: predicates, + ObjectDeletionProtection: deletionProtection, + independentSyncPeriod: independentSyncPeriod, + } +} + +//+kubebuilder:rbac:groups=atlas.mongodb.com,resources=atlasnetworkpeerings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=atlas.mongodb.com,resources=atlasnetworkpeerings/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=atlas.mongodb.com,resources=atlasnetworkpeerings/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the AtlasNetworkPeering object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile +func (r *AtlasNetworkPeeringReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Log.Infow("-> Starting AtlasNetworkPeering reconciliation") + + akoNetworkPeering := akov2.AtlasNetworkPeering{} + result := customresource.PrepareResource(ctx, r.Client, req, &akoNetworkPeering, r.Log) + if !result.IsOk() { + return result.ReconcileResult(), errors.New(result.GetMessage()) + } + return r.handleCustomResource(ctx, &akoNetworkPeering) +} + +// For prepares the controller for its target Custom Resource; Network Containers +func (r *AtlasNetworkPeeringReconciler) For() (client.Object, builder.Predicates) { + predicates := append([]predicate.Predicate{tracePredicate(r.Log)}, r.GlobalPredicates...) + return &akov2.AtlasNetworkPeering{}, builder.WithPredicates(predicates...) +} + +// tracePredicate avoids the first update, coming from the cache without an status. +// That way an error loop is avoided as the status empty may try to create the peering again, +// and fail if it was already created. +func tracePredicate(logger *zap.SugaredLogger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + newObj, ok := e.ObjectNew.(*akov2.AtlasNetworkPeering) + if !ok || reflect.DeepEqual(newObj.Status, status.AtlasNetworkPeeringStatus{}) { + logger.Warnf("SKIP UPDATE with empty status: event %#+v", e) + return false + } + return true + }, + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AtlasNetworkPeeringReconciler) SetupWithManager(mgr ctrl.Manager, skipNameValidation bool) error { + return ctrl.NewControllerManagedBy(mgr). + For(r.For()). + Watches( + &akov2.AtlasProject{}, + handler.EnqueueRequestsFromMapFunc(r.networkPeeringForProjectMapFunc()), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.networkPeeringForCredentialMapFunc()), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + WithOptions(controller.TypedOptions[reconcile.Request]{SkipNameValidation: pointer.MakePtr(skipNameValidation)}). + Complete(r) +} + +func (r *AtlasNetworkPeeringReconciler) networkPeeringForProjectMapFunc() handler.MapFunc { + return func(ctx context.Context, obj client.Object) []reconcile.Request { + atlasProject, ok := obj.(*akov2.AtlasProject) + if !ok { + r.Log.Warnf("watching Project but got %T", obj) + + return nil + } + + npList := &akov2.AtlasNetworkPeeringList{} + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector( + indexer.AtlasNetworkPeeringByProjectIndex, + client.ObjectKeyFromObject(atlasProject).String(), + ), + } + err := r.Client.List(ctx, npList, listOpts) + if err != nil { + r.Log.Errorf("failed to list AtlasPrivateEndpoint: %s", err) + + return []reconcile.Request{} + } + + requests := make([]reconcile.Request, 0, len(npList.Items)) + for _, item := range npList.Items { + requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: item.Name, Namespace: item.Namespace}}) + } + + return requests + } +} + +func (r *AtlasNetworkPeeringReconciler) networkPeeringForCredentialMapFunc() handler.MapFunc { + return indexer.CredentialsIndexMapperFunc( + indexer.AtlasNetworkPeeringCredentialsIndex, + func() *akov2.AtlasNetworkPeeringList { return &akov2.AtlasNetworkPeeringList{} }, + indexer.NetworkPeeringRequests, + r.Client, + r.Log, + ) +} diff --git a/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller_test.go b/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller_test.go new file mode 100644 index 0000000000..f7fa8e0d6d --- /dev/null +++ b/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller_test.go @@ -0,0 +1,111 @@ +package atlasnetworkpeering + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" +) + +func TestReconcile(t *testing.T) { + ctx := context.Background() + + testScheme := runtime.NewScheme() + require.NoError(t, akov2.AddToScheme(testScheme)) + + tests := map[string]struct { + request reconcile.Request + expectedResult reconcile.Result + expectedLogs []string + }{ + "failed to prepare resource": { + request: reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "np0"}}, + expectedResult: reconcile.Result{}, + expectedLogs: []string{ + "-> Starting AtlasNetworkPeering reconciliation", + "Object default/np0 doesn't exist, was it deleted after reconcile request?", + }, + }, + "prepare resource for reconciliation": { + request: reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "np1"}}, + expectedResult: reconcile.Result{}, + expectedLogs: []string{ + "-> Starting AtlasNetworkPeering reconciliation", + "-> Skipping AtlasNetworkPeering reconciliation as annotation mongodb.com/atlas-reconciliation-policy=skip", + }, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + core, logs := observer.New(zap.DebugLevel) + fakeClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(testNetworkProvider()). + Build() + r := &AtlasNetworkPeeringReconciler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: fakeClient, + Log: zap.New(core).Sugar(), + }, + } + result, _ := r.Reconcile(ctx, tc.request) + assert.Equal(t, tc.expectedResult, result) + assert.Equal(t, len(tc.expectedLogs), logs.Len()) + for i, log := range logs.All() { + assert.Equal(t, tc.expectedLogs[i], log.Message) + } + }) + } +} + +func testNetworkProvider() *akov2.AtlasNetworkPeering { + return &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "np1", + Namespace: "default", + Annotations: map[string]string{ + customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip, + }, + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ExternalProjectRef: &akov2.ExternalProjectReference{ + ID: "fake-project-id", + }, + ConnectionSecret: &api.LocalObjectReference{ + Name: "fake-secret", + }, + }, + ContainerRef: akov2.ContainerDualReference{ + Name: "fake-container-id", + }, + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + }, + Status: status.AtlasNetworkPeeringStatus{ + ID: "peering-id", + }, + } +} diff --git a/internal/controller/atlasnetworkpeering/state.go b/internal/controller/atlasnetworkpeering/state.go new file mode 100644 index 0000000000..d0069defc1 --- /dev/null +++ b/internal/controller/atlasnetworkpeering/state.go @@ -0,0 +1,138 @@ +package atlasnetworkpeering + +import ( + "context" + "errors" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/statushandler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" +) + +const ( + typeName = "AtlasNetworkPeering" +) + +type reconcileRequest struct { + service networkpeering.NetworkPeeringService + containerService networkcontainer.NetworkContainerService + projectID string + networkPeering *akov2.AtlasNetworkPeering +} + +func (r *AtlasNetworkPeeringReconciler) handleCustomResource(ctx context.Context, networkPeering *akov2.AtlasNetworkPeering) (ctrl.Result, error) { + if customresource.ReconciliationShouldBeSkipped(networkPeering) { + return r.Skip(ctx, typeName, networkPeering, &networkPeering.Spec) + } + + conditions := api.InitCondition(networkPeering, api.FalseCondition(api.ReadyType)) + workflowCtx := workflow.NewContext(r.Log, conditions, ctx, networkPeering) + defer statushandler.Update(workflowCtx, r.Client, r.EventRecorder, networkPeering) + + isValid := customresource.ValidateResourceVersion(workflowCtx, networkPeering, r.Log) + if !isValid.IsOk() { + return r.Invalidate(typeName, isValid) + } + + if !r.AtlasProvider.IsResourceSupported(networkPeering) { + return r.Unsupport(workflowCtx, typeName) + } + + credentials, err := r.ResolveCredentials(ctx, networkPeering) + if err != nil { + return r.release(workflowCtx, networkPeering, err) + } + sdkClientSet, orgID, err := r.AtlasProvider.SdkClientSet(ctx, credentials, r.Log) + if err != nil { + return r.terminate(workflowCtx, networkPeering, workflow.NetworkPeeringNotConfigured, err) + } + project, err := r.ResolveProject(ctx, sdkClientSet.SdkClient20231115008, networkPeering, orgID) + if err != nil { + return r.release(workflowCtx, networkPeering, err) + } + return r.handle(workflowCtx, &reconcileRequest{ + service: networkpeering.NewNetworkPeeringServiceFromClientSet(sdkClientSet), + containerService: networkcontainer.NewNetworkContainerServiceFromClientSet(sdkClientSet), + projectID: project.ID, + networkPeering: networkPeering, + }) +} + +func (r *AtlasNetworkPeeringReconciler) handle(workflowCtx *workflow.Context, req *reconcileRequest) (ctrl.Result, error) { + r.Log.Infow("handling network peering reconcile request", + "service set", (req.service != nil), "projectID", req.projectID, "networkPeering", req.networkPeering) + container, err := r.getContainer(workflowCtx.Context, req) + if err != nil { + return r.terminate(workflowCtx, req.networkPeering, workflow.Internal, err) + } + if container == nil { + err := fmt.Errorf("container not found for reference %v", req.networkPeering.Spec.ContainerRef) + return r.terminate(workflowCtx, req.networkPeering, workflow.NetworkPeeringMissingContainer, err) + } + var atlasPeer *networkpeering.NetworkPeer + if req.networkPeering.Status.ID != "" { + peer, err := req.service.Get(workflowCtx.Context, req.projectID, req.networkPeering.Status.ID) + if err != nil && !errors.Is(err, networkpeering.ErrNotFound) { + return r.terminate(workflowCtx, req.networkPeering, workflow.Internal, err) + } + atlasPeer = peer + } + inAtlas := atlasPeer != nil + deleted := req.networkPeering.DeletionTimestamp != nil + + switch { + case !deleted && !inAtlas: + return r.create(workflowCtx, req, container) + case !deleted && inAtlas: + return r.sync(workflowCtx, req, atlasPeer, container) + case deleted && inAtlas: + return r.delete(workflowCtx, req, atlasPeer, container) + default: // deleted && !inAtlas + return r.unmanage(workflowCtx, req) + } +} + +func (r *AtlasNetworkPeeringReconciler) getContainer(ctx context.Context, req *reconcileRequest) (*networkcontainer.NetworkContainer, error) { + id := req.networkPeering.Spec.ContainerRef.ID + if req.networkPeering.Spec.ContainerRef.ID == "" { // Name should be non nil instead + var err error + id, err = getContainerIDFromKubernetes(ctx, r.Client, req.networkPeering) + if err != nil { + return nil, fmt.Errorf("failed to solve Network Container id from Kubernetes: %w", err) + } + if id == "" { + return nil, fmt.Errorf("container %s has no id, is it still to be created?", + req.networkPeering.Spec.ContainerRef.Name) + } + } + container, err := req.containerService.Get(ctx, req.projectID, id) + if err != nil { + return nil, fmt.Errorf("failed to fetch Network Container %s from Atlas by id: %w", id, err) + } + return container, nil +} + +func getContainerIDFromKubernetes(ctx context.Context, k8sClient client.Client, networkPeering *akov2.AtlasNetworkPeering) (string, error) { + k8sContainer := akov2.AtlasNetworkContainer{} + key := client.ObjectKey{ + Name: networkPeering.Spec.ContainerRef.Name, + Namespace: networkPeering.Namespace, + } + err := k8sClient.Get(ctx, key, &k8sContainer) + if err != nil { + return "", fmt.Errorf("failed to fetch the Kubernetes Network Container %s info: %w", key.Name, err) + } + id := k8sContainer.Spec.ID + if id == "" { + id = k8sContainer.Status.ID + } + return id, nil +} diff --git a/internal/controller/atlasnetworkpeering/state_test.go b/internal/controller/atlasnetworkpeering/state_test.go new file mode 100644 index 0000000000..4c30684df1 --- /dev/null +++ b/internal/controller/atlasnetworkpeering/state_test.go @@ -0,0 +1,995 @@ +package atlasnetworkpeering + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20231115008/admin" + "go.mongodb.org/atlas-sdk/v20231115008/mockadmin" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" + atlasmock "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/mocks/atlas" + akomock "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/mocks/translation" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" +) + +var ( + // sample error test + ErrTestFail = errors.New("failure") +) + +const ( + testProjectID = "project-id" + + testContainerName = "fake-container-name" + + testContainerID = "fake-container-id" + + testPeeringID = "peering-id" + + // testVpcID = "vpc-id" +) + +func TestHandleCustomResource(t *testing.T) { + deletionTime := metav1.Now() + tests := []struct { + title string + networkPeering *akov2.AtlasNetworkPeering + provider atlas.Provider + wantResult ctrl.Result + wantFinalizers []string + wantConditions []api.Condition + }{ + { + title: "should skip reconciliation", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + Annotations: map[string]string{ + customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip, + }, + Finalizers: []string{customresource.FinalizerLabel}, + }, + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + }, + { + title: "should fail to validate resource", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + Labels: map[string]string{ + customresource.ResourceVersion: "wrong", + }, + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType), + api.FalseCondition(api.ResourceVersionStatus). + WithReason(string(workflow.AtlasResourceVersionIsInvalid)). + WithMessageRegexp("wrong is not a valid semver version for label mongodb.com/atlas-resource-version"), + }, + }, + { + title: "should fail when not supported", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return false + }, + }, + wantResult: ctrl.Result{}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.AtlasGovUnsupported)). + WithMessageRegexp("the AtlasNetworkPeering is not supported by Atlas for government"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should fail to resolve credentials and remove finalizer", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-no-existing-project", + }, + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.NetworkPeeringNotConfigured)). + WithMessageRegexp("missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should fail to create sdk", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "my-secret", + }, + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + SdkSetClientFunc: func(secretRef *client.ObjectKey, log *zap.SugaredLogger) (*atlas.ClientSet, string, error) { + return nil, "", errors.New("failed to create sdk") + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.NetworkPeeringNotConfigured)). + WithMessageRegexp("failed to create sdk"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should fail to resolve project and remove finalizers", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + Finalizers: []string{customresource.FinalizerLabel}, + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "my-secret", + }, + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-no-existing-project", + }, + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + SdkSetClientFunc: func(secretRef *client.ObjectKey, log *zap.SugaredLogger) (*atlas.ClientSet, string, error) { + return &atlas.ClientSet{}, "", nil + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.NetworkPeeringNotConfigured)). + WithMessageRegexp("failed to query Kubernetes: failed to get Project from Kubernetes: missing Kubernetes Atlas Project\natlasprojects.atlas.mongodb.com \"my-no-existing-project\" not found"), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + { + title: "should handle network peering but fail to find container id from kube", + networkPeering: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "network-peering", + Namespace: "default", + Finalizers: []string{customresource.FinalizerLabel}, + DeletionTimestamp: &deletionTime, + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "my-secret", + }, + ProjectRef: &common.ResourceRefNamespaced{ + Name: "my-project", + }, + }, + }, + }, + provider: &atlasmock.TestProvider{ + IsSupportedFunc: func() bool { + return true + }, + SdkSetClientFunc: func(secretRef *client.ObjectKey, log *zap.SugaredLogger) (*atlas.ClientSet, string, error) { + npAPI := mockadmin.NewNetworkPeeringApi(t) + return &atlas.ClientSet{ + SdkClient20231115008: &admin.APIClient{NetworkPeeringApi: npAPI}, + }, "", nil + }, + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.Internal)).WithMessageRegexp( + "failed to solve Network Container id from Kubernetes: failed to fetch the Kubernetes Network Container info: atlasnetworkcontainers.atlas.mongodb.com \"\" not found", + ), + api.TrueCondition(api.ResourceVersionStatus), + }, + }, + } + for _, tc := range tests { + t.Run(tc.title, func(t *testing.T) { + project := &akov2.AtlasProject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-project", + Namespace: "default", + }, + } + testScheme := runtime.NewScheme() + require.NoError(t, akov2.AddToScheme(testScheme)) + k8sClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(project, tc.networkPeering). + WithStatusSubresource(tc.networkPeering). + Build() + logger := zaptest.NewLogger(t) + ctx := context.Background() + r := testReconciler(k8sClient, tc.provider, logger) + result, err := r.handleCustomResource(ctx, tc.networkPeering) + np := getNetworkPeering(t, ctx, k8sClient, client.ObjectKeyFromObject(tc.networkPeering)) + require.NoError(t, err) + assert.Equal(t, tc.wantResult, result) + assert.Equal(t, tc.wantFinalizers, getFinalizers(np)) + assert.Equal(t, cleanConditions(tc.wantConditions), cleanConditions(getConditions(np))) + }) + } +} + +func TestHandle(t *testing.T) { + deletionTime := metav1.Now() + emptyProvider := &atlasmock.TestProvider{} + logger := zaptest.NewLogger(t) + for _, tc := range []struct { + title string + req *reconcileRequest + wantResult ctrl.Result + wantErr error + wantFinalizers []string + wantConditions []api.Condition + }{ + { + title: "create succeeds and goes in progress", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: testNetworkPeering(), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Create(mock.Anything, testProjectID, testContainerID, mock.Anything).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "CREATING", + }, + nil, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.NetworkPeerReadyType). + WithMessageRegexp(fmt.Sprintf("Network Peering Connection %s is CREATING", testPeeringID)), + api.FalseCondition(api.ReadyType), + }, + }, + + { + title: "create fails", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: testNetworkPeering(), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Create(mock.Anything, testProjectID, testContainerID, mock.Anything).Return( + nil, ErrTestFail, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: nil, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.NetworkPeeringNotConfigured)). + WithMessageRegexp(fmt.Sprintf("failed to create peering connection: %v", ErrTestFail)), + }, + }, + + { + title: "peering in sync", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus(testNetworkPeering(), status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.TrueCondition(api.NetworkPeerReadyType), + api.TrueCondition(api.ReadyType), + }, + }, + + { + title: "peering connecting", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "NOT YET AVAILABLE", + }, + nil, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.NetworkPeerReadyType).WithMessageRegexp( + "Network Peering Connection peering-id is NOT YET AVAILABLE", + ), + api.FalseCondition(api.ReadyType), + }, + }, + + { + title: "peering creation failed", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "OOPs!", + ErrorMessage: ErrTestFail.Error(), + }, + nil, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType). + WithReason(string(workflow.Internal)). + WithMessageRegexp( + fmt.Sprintf("peering connection failed: %s", ErrTestFail.Error()), + ), + }, + }, + + { + title: "update succeeds", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ) + nps.EXPECT().Update(mock.Anything, testProjectID, testPeeringID, testContainerID, mock.Anything).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "UPDATING", + }, + nil, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.NetworkPeerReadyType).WithMessageRegexp( + "Network Peering Connection peering-id is UPDATING", + ), + api.FalseCondition(api.ReadyType), + }, + }, + + { + title: "update fails", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ) + nps.EXPECT().Update(mock.Anything, testProjectID, testPeeringID, testContainerID, mock.Anything).Return( + nil, ErrTestFail, + ) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.Internal)). + WithMessageRegexp(fmt.Sprintf("failed to update peering connection: %v", ErrTestFail)), + }, + }, + + { + title: "delete succeeds", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + withDeletionTimestamp( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + &deletionTime, + ), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ).Once() + nps.EXPECT().Delete(mock.Anything, testProjectID, testPeeringID).Return(nil) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "DELETING", + }, + nil, + ).Once() + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.NetworkPeerReadyType).WithMessageRegexp( + "Network Peering Connection peering-id is DELETING", + ), + api.FalseCondition(api.ReadyType), + }, + }, + + { + title: "delete fails", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + withDeletionTimestamp( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + &deletionTime, + ), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ) + nps.EXPECT().Delete(mock.Anything, testProjectID, testPeeringID).Return(ErrTestFail) + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.Internal)). + WithMessageRegexp(fmt.Sprintf("failed to delete peer connection %s: %s", + testPeeringID, ErrTestFail.Error())), + }, + }, + + { + title: "delete fails getting closing peering", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + withDeletionTimestamp( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + &deletionTime, + ), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ).Once() + nps.EXPECT().Delete(mock.Anything, testProjectID, testPeeringID).Return(nil) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return(nil, ErrTestFail).Once() + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{RequeueAfter: workflow.DefaultRetry}, + wantFinalizers: []string{customresource.FinalizerLabel}, + wantConditions: []api.Condition{ + api.FalseCondition(api.ReadyType).WithReason(string(workflow.Internal)). + WithMessageRegexp(fmt.Sprintf("failed to get closing peer connection %s: %s", + testPeeringID, ErrTestFail.Error())), + }, + }, + + { + title: "delete immediately success with not found", + req: &reconcileRequest{ + projectID: testProjectID, + networkPeering: withStatus( + withDeletionTimestamp( + WithFinalizers(testNetworkPeering(), []string{customresource.FinalizerLabel}), + &deletionTime, + ), + status.AtlasNetworkPeeringStatus{ + ID: testPeeringID, + }), + service: func() networkpeering.NetworkPeeringService { + nps := akomock.NewNetworkPeeringServiceMock(t) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "11.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + ID: testPeeringID, + ContainerID: testContainerID, + Status: "AVAILABLE", + }, + nil, + ).Once() + nps.EXPECT().Delete(mock.Anything, testProjectID, testPeeringID).Return(nil) + nps.EXPECT().Get(mock.Anything, testProjectID, testPeeringID).Return( + nil, networkpeering.ErrNotFound, + ).Once() + return nps + }(), + containerService: func() networkcontainer.NetworkContainerService { + ncs := akomock.NewNetworkContainerServiceMock(t) + ncs.EXPECT().Get(mock.Anything, testProjectID, testContainerID).Return( + testAtlasContainer(), nil, + ) + return ncs + }(), + }, + wantResult: ctrl.Result{}, + wantConditions: []api.Condition{}, + }, + } { + t.Run(tc.title, func(t *testing.T) { + workflowCtx := &workflow.Context{ + Context: context.Background(), + } + testScheme := runtime.NewScheme() + require.NoError(t, akov2.AddToScheme(testScheme)) + k8sClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(tc.req.networkPeering, testContainer()). + Build() + r := testReconciler(k8sClient, emptyProvider, logger) + result, err := r.handle(workflowCtx, tc.req) + assert.ErrorIs(t, err, tc.wantErr) + assert.Equal(t, tc.wantResult, result) + nc := getNetworkPeering(t, workflowCtx.Context, k8sClient, client.ObjectKeyFromObject(tc.req.networkPeering)) + assert.Equal(t, tc.wantFinalizers, getFinalizers(nc)) + assert.Equal(t, cleanConditions(tc.wantConditions), cleanConditions(workflowCtx.Conditions())) + }) + } +} + +func getNetworkPeering(t *testing.T, ctx context.Context, k8sClient client.Client, key client.ObjectKey) *akov2.AtlasNetworkPeering { + networkPeering := &akov2.AtlasNetworkPeering{} + if err := k8sClient.Get(ctx, key, networkPeering); err != nil && !k8serrors.IsNotFound(err) { + require.NoError(t, err) + } + return networkPeering +} + +func getFinalizers(networkContainer *akov2.AtlasNetworkPeering) []string { + if networkContainer == nil { + return nil + } + return networkContainer.GetFinalizers() +} + +func getConditions(networkContainer *akov2.AtlasNetworkPeering) []api.Condition { + if networkContainer == nil { + return nil + } + return networkContainer.Status.GetConditions() +} + +func testReconciler(k8sClient client.Client, provider atlas.Provider, logger *zap.Logger) *AtlasNetworkPeeringReconciler { + return &AtlasNetworkPeeringReconciler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: k8sClient, + Log: logger.Sugar(), + }, + AtlasProvider: provider, + EventRecorder: record.NewFakeRecorder(10), + } +} + +func testContainer() *akov2.AtlasNetworkContainer { + return &akov2.AtlasNetworkContainer{ + ObjectMeta: metav1.ObjectMeta{ + Name: testContainerName, + Namespace: "default", + Annotations: map[string]string{ + customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip, + }, + }, + Spec: akov2.AtlasNetworkContainerSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ExternalProjectRef: &akov2.ExternalProjectReference{ + ID: testProjectID, + }, + ConnectionSecret: &api.LocalObjectReference{}, + }, + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.0.0.0/18", + }, + }, + Status: status.AtlasNetworkContainerStatus{ + ID: testContainerID, + Provisioned: true, + }, + } +} + +func testAtlasContainer() *networkcontainer.NetworkContainer { + return &networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: "AWS", + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.0.0.0/18", + }, + }, + ID: testContainerID, + } +} + +func cleanConditions(inputs []api.Condition) []api.Condition { + outputs := make([]api.Condition, 0, len(inputs)) + for _, condition := range inputs { + clean := condition + clean.LastTransitionTime = metav1.Time{} + outputs = append(outputs, clean) + } + return outputs +} + +func testNetworkPeering() *akov2.AtlasNetworkPeering { + return &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-peering", + Namespace: "default", + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ExternalProjectRef: &akov2.ExternalProjectReference{ + ID: testProjectID, + }, + ConnectionSecret: &api.LocalObjectReference{ + Name: "fake-secret", + }, + }, + ContainerRef: akov2.ContainerDualReference{ + Name: testContainerName, + }, + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: "AWS", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/8", + VpcID: "vpc-id-test", + }, + }, + }, + } +} + +func withStatus(networkPeering *akov2.AtlasNetworkPeering, status status.AtlasNetworkPeeringStatus) *akov2.AtlasNetworkPeering { + networkPeering.Status = status + return networkPeering +} + +func WithFinalizers(networkPeering *akov2.AtlasNetworkPeering, finalizers []string) *akov2.AtlasNetworkPeering { + networkPeering.Finalizers = finalizers + return networkPeering +} + +func withDeletionTimestamp(networkPeering *akov2.AtlasNetworkPeering, deletionTimestamp *metav1.Time) *akov2.AtlasNetworkPeering { + networkPeering.DeletionTimestamp = deletionTimestamp + return networkPeering +} diff --git a/internal/controller/atlasnetworkpeering/transition.go b/internal/controller/atlasnetworkpeering/transition.go new file mode 100644 index 0000000000..29f800ede8 --- /dev/null +++ b/internal/controller/atlasnetworkpeering/transition.go @@ -0,0 +1,149 @@ +package atlasnetworkpeering + +import ( + "errors" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" +) + +func (r *AtlasNetworkPeeringReconciler) create(workflowCtx *workflow.Context, req *reconcileRequest, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + newPeer, err := req.service.Create( + workflowCtx.Context, + req.projectID, + container.ID, + &req.networkPeering.Spec.AtlasNetworkPeeringConfig, + ) + if err != nil { + wrappedErr := fmt.Errorf("failed to create peering connection: %w", err) + return r.terminate(workflowCtx, req.networkPeering, workflow.NetworkPeeringNotConfigured, wrappedErr) + } + if err := customresource.ManageFinalizer(workflowCtx.Context, r.Client, req.networkPeering, customresource.SetFinalizer); err != nil { + return r.terminate(workflowCtx, req.networkPeering, workflow.AtlasFinalizerNotSet, err) + } + return r.inProgress(workflowCtx, workflow.NetworkPeeringConnectionCreating, newPeer, container) +} + +func (r *AtlasNetworkPeeringReconciler) sync(workflowCtx *workflow.Context, req *reconcileRequest, atlasPeer *networkpeering.NetworkPeer, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + switch { + case atlasPeer.Failed(): + err := fmt.Errorf("peering connection failed: %s", atlasPeer.ErrorMessage) + return r.terminate(workflowCtx, req.networkPeering, workflow.Internal, err) + case !atlasPeer.Available(): + return r.inProgress(workflowCtx, workflow.NetworkPeeringConnectionPending, atlasPeer, container) + } + specPeer := networkpeering.NewNetworkPeer(atlasPeer.ID, &req.networkPeering.Spec.AtlasNetworkPeeringConfig) + if !networkpeering.CompareConfigs(atlasPeer, specPeer) { + return r.update(workflowCtx, req, container) + } + return r.ready(workflowCtx, req, atlasPeer, container) +} + +func (r *AtlasNetworkPeeringReconciler) update(workflowCtx *workflow.Context, req *reconcileRequest, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + updatedPeer, err := req.service.Update(workflowCtx.Context, req.projectID, req.networkPeering.Status.ID, container.ID, &req.networkPeering.Spec.AtlasNetworkPeeringConfig) + if err != nil { + wrappedErr := fmt.Errorf("failed to update peering connection: %w", err) + return r.terminate(workflowCtx, req.networkPeering, workflow.Internal, wrappedErr) + } + return r.inProgress(workflowCtx, workflow.NetworkPeeringConnectionUpdating, updatedPeer, container) +} + +func (r *AtlasNetworkPeeringReconciler) delete(workflowCtx *workflow.Context, req *reconcileRequest, atlasPeer *networkpeering.NetworkPeer, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + id := req.networkPeering.Status.ID + peer := atlasPeer + if id != "" && !atlasPeer.Closing() { + if err := req.service.Delete(workflowCtx.Context, req.projectID, id); err != nil { + wrappedErr := fmt.Errorf("failed to delete peer connection %s: %w", id, err) + return r.terminate(workflowCtx, req.networkPeering, workflow.Internal, wrappedErr) + } + closingPeer, err := req.service.Get(workflowCtx.Context, req.projectID, id) + if err != nil && !errors.Is(err, networkpeering.ErrNotFound) { + wrappedErr := fmt.Errorf("failed to get closing peer connection %s: %w", id, err) + return r.terminate(workflowCtx, req.networkPeering, workflow.Internal, wrappedErr) + } + peer = closingPeer + } + if peer == nil { + return r.unmanage(workflowCtx, req) + } + return r.inProgress(workflowCtx, workflow.NetworkPeeringConnectionClosing, peer, container) +} + +func (r *AtlasNetworkPeeringReconciler) unmanage(workflowCtx *workflow.Context, req *reconcileRequest) (ctrl.Result, error) { + workflowCtx.EnsureStatusOption(clearPeeringStatusOption()) + if err := customresource.ManageFinalizer(workflowCtx.Context, r.Client, req.networkPeering, customresource.UnsetFinalizer); err != nil { + return r.terminate(workflowCtx, req.networkPeering, workflow.AtlasFinalizerNotRemoved, err) + } + + return workflow.Deleted().ReconcileResult(), nil +} + +func (r *AtlasNetworkPeeringReconciler) inProgress(workflowCtx *workflow.Context, reason workflow.ConditionReason, peer *networkpeering.NetworkPeer, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + statusMsg := fmt.Sprintf("Network Peering Connection %s is %s", peer.ID, peer.Status) + workflowCtx.EnsureStatusOption(updatePeeringStatusOption(peer, container)) + workflowCtx.SetConditionFalseMsg(api.NetworkPeerReadyType, statusMsg) + workflowCtx.SetConditionFalse(api.ReadyType) + + return workflow.InProgress(reason, statusMsg).ReconcileResult(), nil +} + +func (r *AtlasNetworkPeeringReconciler) ready(workflowCtx *workflow.Context, req *reconcileRequest, peer *networkpeering.NetworkPeer, container *networkcontainer.NetworkContainer) (ctrl.Result, error) { + if err := customresource.ManageFinalizer(workflowCtx.Context, r.Client, req.networkPeering, customresource.SetFinalizer); err != nil { + return r.terminate(workflowCtx, req.networkPeering, workflow.AtlasFinalizerNotSet, err) + } + + workflowCtx.EnsureStatusOption(updatePeeringStatusOption(peer, container)) + workflowCtx.SetConditionTrue(api.NetworkPeerReadyType) + workflowCtx.SetConditionTrue(api.ReadyType) + + if req.networkPeering.Spec.ExternalProjectRef != nil { + return workflow.Requeue(r.independentSyncPeriod).ReconcileResult(), nil + } + + return workflow.OK().ReconcileResult(), nil +} + +func (r *AtlasNetworkPeeringReconciler) release(workflowCtx *workflow.Context, networkPeering *akov2.AtlasNetworkPeering, err error) (ctrl.Result, error) { + if errors.Is(err, reconciler.ErrMissingKubeProject) { + if finalizerErr := customresource.ManageFinalizer(workflowCtx.Context, r.Client, networkPeering, customresource.UnsetFinalizer); finalizerErr != nil { + err = errors.Join(err, finalizerErr) + } + } + return r.terminate(workflowCtx, networkPeering, workflow.NetworkPeeringNotConfigured, err) +} + +func (r *AtlasNetworkPeeringReconciler) terminate( + ctx *workflow.Context, + resource api.AtlasCustomResource, + reason workflow.ConditionReason, + err error, +) (ctrl.Result, error) { + condition := api.ReadyType + r.Log.Errorf("resource %T(%s/%s) failed on condition %s: %s", + resource, resource.GetNamespace(), resource.GetName(), condition, err) + result := workflow.Terminate(reason, err) + ctx.SetConditionFalse(api.ReadyType).SetConditionFromResult(condition, result) + + return result.ReconcileResult(), nil +} + +func updatePeeringStatusOption(peer *networkpeering.NetworkPeer, container *networkcontainer.NetworkContainer) status.AtlasNetworkPeeringStatusOption { + return func(peeringStatus *status.AtlasNetworkPeeringStatus) { + networkpeering.ApplyPeeringStatus(peeringStatus, peer, container) + } +} + +func clearPeeringStatusOption() status.AtlasNetworkPeeringStatusOption { + return func(peeringStatus *status.AtlasNetworkPeeringStatus) { + networkpeering.ClearPeeringStatus(peeringStatus) + } +} diff --git a/internal/controller/atlasnetworkpeering/transition_test.go b/internal/controller/atlasnetworkpeering/transition_test.go new file mode 100644 index 0000000000..fb3687b8a1 --- /dev/null +++ b/internal/controller/atlasnetworkpeering/transition_test.go @@ -0,0 +1,168 @@ +package atlasnetworkpeering + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + v1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" +) + +func TestApplyPeeringStatus(t *testing.T) { + for _, tc := range []struct { + title string + peer networkpeering.NetworkPeer + container networkcontainer.NetworkContainer + wantStatus status.AtlasNetworkPeeringStatus + }{ + { + title: "wrong provider fails", + peer: networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: v1.AtlasNetworkPeeringConfig{ + Provider: "Azure", // Should be "AZURE" + }, + }, + wantStatus: status.AtlasNetworkPeeringStatus{ + Status: "unsupported provider: \"Azure\"", + }, + }, + + { + title: "Sample AWS works", + peer: networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: v1.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &v1.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: "some-aws-id", + RouteTableCIDRBlock: "10.0.0.0/18", + VpcID: "vpc-id-app-fake", + }, + }, + ID: "peer-id", + ContainerID: "container-id", + Status: "some status", + ErrorMessage: "some error", + AWSStatus: &status.AWSPeeringStatus{ + ConnectionID: "connection-id", + }, + }, + container: networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: v1.AtlasNetworkContainerConfig{ + ID: "container-id", + Region: "us-east-2", + CIDRBlock: "11.0.0.0/18", + }, + }, + ID: "container-id", + Provisioned: true, + AWSStatus: &networkcontainer.AWSContainerStatus{ + VpcID: "vpc-id-container-fake", + }, + }, + wantStatus: status.AtlasNetworkPeeringStatus{ + ID: "peer-id", + Status: "some status", + AWSStatus: &status.AWSPeeringStatus{ + VpcID: "vpc-id-container-fake", + ConnectionID: "connection-id", + }, + }, + }, + + { + title: "Sample Azure works", + peer: networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: v1.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAzure), + AzureConfiguration: &v1.AzureNetworkPeeringConfiguration{ + AzureDirectoryID: "azure-app-dir-id", + AzureSubscriptionID: "azure-app-subcription-id", + ResourceGroupName: "resource-group", + VNetName: "some-net", + }, + }, + ID: "peer-id", + ContainerID: "container-id", + Status: "some status", + ErrorMessage: "some error", + }, + container: networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: v1.AtlasNetworkContainerConfig{ + ID: "container-id", + Region: "US_EAST_2", + CIDRBlock: "11.0.0.0/18", + }, + }, + ID: "container-id", + Provisioned: true, + AzureStatus: &networkcontainer.AzureContainerStatus{ + AzureSubscriptionID: "azure-atlas-subcription-id", + VnetName: "atlas-net-name", + }, + }, + wantStatus: status.AtlasNetworkPeeringStatus{ + ID: "peer-id", + Status: "some status", + AzureStatus: &status.AzurePeeringStatus{ + AzureSubscriptionID: "azure-atlas-subcription-id", + VnetName: "atlas-net-name", + }, + }, + }, + + { + title: "Sample GCP works", + peer: networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: v1.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &v1.GCPNetworkPeeringConfiguration{ + GCPProjectID: "gcp-app-project", + NetworkName: "gcp-app-network", + }, + }, + ID: "peer-id", + ContainerID: "container-id", + Status: "some status", + ErrorMessage: "some error", + }, + container: networkcontainer.NetworkContainer{ + NetworkContainerConfig: networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: v1.AtlasNetworkContainerConfig{ + ID: "container-id", + CIDRBlock: "11.0.0.0/18", + }, + }, + ID: "container-id", + Provisioned: true, + GCPStatus: &networkcontainer.GCPContainerStatus{ + GCPProjectID: "gcp-atlas-project", + NetworkName: "gcp-atlas-network", + }, + }, + wantStatus: status.AtlasNetworkPeeringStatus{ + ID: "peer-id", + Status: "some status", + GCPStatus: &status.GCPPeeringStatus{ + GCPProjectID: "gcp-atlas-project", + NetworkName: "gcp-atlas-network", + }, + }, + }, + } { + t.Run(tc.title, func(t *testing.T) { + status := status.AtlasNetworkPeeringStatus{} + networkpeering.ApplyPeeringStatus(&status, &tc.peer, &tc.container) + assert.Equal(t, tc.wantStatus, status) + }) + } +} diff --git a/internal/controller/registry.go b/internal/controller/registry.go index 46a1613065..bd2b9e7dd9 100644 --- a/internal/controller/registry.go +++ b/internal/controller/registry.go @@ -19,6 +19,7 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasfederatedauth" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasipaccesslist" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasnetworkcontainer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasnetworkpeering" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasprivateendpoint" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasproject" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlassearchindexconfig" @@ -81,5 +82,6 @@ func (r *Registry) registerControllers(c cluster.Cluster, ap atlas.Provider) { reconcilers = append(reconcilers, atlasprivateendpoint.NewAtlasPrivateEndpointReconciler(c, r.predicates, ap, r.deletionProtection, r.independentSyncPeriod, r.logger)) reconcilers = append(reconcilers, atlasipaccesslist.NewAtlasIPAccessListReconciler(c, r.predicates, ap, r.deletionProtection, r.independentSyncPeriod, r.logger)) reconcilers = append(reconcilers, atlasnetworkcontainer.NewAtlasNetworkContainerReconciler(c, r.predicates, ap, r.deletionProtection, r.logger, r.independentSyncPeriod)) + reconcilers = append(reconcilers, atlasnetworkpeering.NewAtlasNetworkPeeringsReconciler(c, r.predicates, ap, r.deletionProtection, r.logger, r.independentSyncPeriod)) r.reconcilers = reconcilers } diff --git a/internal/controller/workflow/reason.go b/internal/controller/workflow/reason.go index acba5a311d..60fea3347b 100644 --- a/internal/controller/workflow/reason.go +++ b/internal/controller/workflow/reason.go @@ -161,16 +161,19 @@ const ( IPAccessListPending ConditionReason = "IPAccessListPending" ) -// Atlas Network Peering reasons -const ( - NetworkPeeringConnectionCreating ConditionReason = "NetworkPeeringConnectionCreating" - NetworkPeeringConnectionPending ConditionReason = "NetworkPeeringConnectionPending" - NetworkPeeringRemovingContainer ConditionReason = "NetworkPeeringRemovingContainer" -) - // Atlas Network Container reasons const ( NetworkContainerNotConfigured ConditionReason = "NetworkContainerNotConfigured" NetworkContainerCreated ConditionReason = "NetworkContainerCreated" NetworkContainerNotDeleted ConditionReason = "NetworkContainerNotDeleted" ) + +// Atlas Network Peering reasons +const ( + NetworkPeeringNotConfigured ConditionReason = "NetworkPeeringNotConfigured" + NetworkPeeringMissingContainer ConditionReason = "NetworkPeeringMissingContainer" + NetworkPeeringConnectionCreating ConditionReason = "NetworkPeeringConnectionCreating" + NetworkPeeringConnectionUpdating ConditionReason = "NetworkPeeringConnectionUpdating" + NetworkPeeringConnectionPending ConditionReason = "NetworkPeeringConnectionPending" + NetworkPeeringConnectionClosing ConditionReason = "NetworkPeeringConnectionClosing" +) diff --git a/internal/indexer/atlasnetworkpeeringcredentials.go b/internal/indexer/atlasnetworkpeeringcredentials.go new file mode 100644 index 0000000000..789456c2cd --- /dev/null +++ b/internal/indexer/atlasnetworkpeeringcredentials.go @@ -0,0 +1,24 @@ +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" +) + +const ( + AtlasNetworkPeeringCredentialsIndex = "atlasnetworkpeering.credentials" +) + +func NewAtlasNetworkPeeringByCredentialIndexer(logger *zap.Logger) *LocalCredentialIndexer { + return NewLocalCredentialsIndexer(AtlasNetworkPeeringCredentialsIndex, &akov2.AtlasNetworkPeering{}, logger) +} + +func NetworkPeeringRequests(list *akov2.AtlasNetworkPeeringList) []reconcile.Request { + requests := make([]reconcile.Request, 0, len(list.Items)) + for _, item := range list.Items { + requests = append(requests, toRequest(&item)) + } + return requests +} diff --git a/internal/indexer/atlasnetworkpeeringsprojects.go b/internal/indexer/atlasnetworkpeeringsprojects.go new file mode 100644 index 0000000000..cb82e7bf1e --- /dev/null +++ b/internal/indexer/atlasnetworkpeeringsprojects.go @@ -0,0 +1,30 @@ +//nolint:dupl +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" +) + +const ( + AtlasNetworkPeeringByProjectIndex = "atlasnetworkpeering.spec.projectRef" +) + +type AtlasNetworkPeeringByProjectIndexer struct { + AtlasReferrerByProjectIndexerBase +} + +func NewAtlasNetworkPeeringByProjectIndexer(logger *zap.Logger) *AtlasNetworkPeeringByProjectIndexer { + return &AtlasNetworkPeeringByProjectIndexer{ + AtlasReferrerByProjectIndexerBase: *NewAtlasReferrerByProjectIndexer( + logger, + AtlasNetworkPeeringByProjectIndex, + ), + } +} + +func (*AtlasNetworkPeeringByProjectIndexer) Object() client.Object { + return &akov2.AtlasNetworkPeering{} +} diff --git a/internal/indexer/atlasnetworkpeeringsprojects_test.go b/internal/indexer/atlasnetworkpeeringsprojects_test.go new file mode 100644 index 0000000000..1967adbe9a --- /dev/null +++ b/internal/indexer/atlasnetworkpeeringsprojects_test.go @@ -0,0 +1,46 @@ +package indexer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" +) + +func TestAtlasNetworkPeeringByProjectIndices(t *testing.T) { + t.Run("should return nil when instance has no project associated to it", func(t *testing.T) { + pe := &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{}, + } + + indexer := NewAtlasNetworkPeeringByProjectIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(pe) + assert.Nil(t, keys) + }) + + t.Run("should return indexes slice when instance has project associated to it", func(t *testing.T) { + pe := &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{ + Name: "project-1", + Namespace: "default", + }, + }, + }, + } + + indexer := NewAtlasNetworkPeeringByProjectIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(pe) + assert.Equal( + t, + []string{ + "default/project-1", + }, + keys, + ) + }) +} diff --git a/internal/indexer/indexer.go b/internal/indexer/indexer.go index ada2708124..93c0532953 100644 --- a/internal/indexer/indexer.go +++ b/internal/indexer/indexer.go @@ -42,6 +42,8 @@ func RegisterAll(ctx context.Context, c cluster.Cluster, logger *zap.Logger) err NewAtlasPrivateEndpointByProjectIndexer(logger), NewAtlasIPAccessListCredentialsByCredentialIndexer(logger), NewAtlasIPAccessListByProjectIndexer(logger), + NewAtlasNetworkPeeringByCredentialIndexer(logger), + NewAtlasNetworkPeeringByProjectIndexer(logger), NewAtlasNetworkContainerByCredentialIndexer(logger), NewAtlasNetworkContainerByProjectIndexer(logger), ) diff --git a/internal/indexer/localcredentials_test.go b/internal/indexer/localcredentials_test.go index 76f4623e26..a38a79aa42 100644 --- a/internal/indexer/localcredentials_test.go +++ b/internal/indexer/localcredentials_test.go @@ -147,6 +147,23 @@ func TestLocalCredentialsIndexer(t *testing.T) { wantKeys: []string{"ns/secret-ref"}, wantObject: &akov2.AtlasNetworkContainer{}, }, + { + name: "should return keys when there is a reference on a network peering", + object: &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user", + Namespace: "ns", + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{Name: "secret-ref"}, + }, + }, + }, + index: AtlasNetworkPeeringCredentialsIndex, + wantKeys: []string{"ns/secret-ref"}, + wantObject: &akov2.AtlasNetworkPeering{}, + }, } { indexers := testIndexers(t) t.Run(tc.name, func(t *testing.T) { @@ -364,6 +381,42 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { }}, }, }, + { + name: "matching input credentials renders matching network peering", + index: AtlasNetworkPeeringCredentialsIndex, + output: &akov2.AtlasNetworkPeering{}, + mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return CredentialsIndexMapperFunc[*akov2.AtlasNetworkPeeringList]( + AtlasNetworkPeeringCredentialsIndex, + func() *akov2.AtlasNetworkPeeringList { return &akov2.AtlasNetworkPeeringList{} }, + NetworkPeeringRequests, + kubeClient, + logger, + ) + }, + input: newTestSecret("matching-peering-secret-ref"), + objects: []client.Object{ + &akov2.AtlasNetworkPeering{ + ObjectMeta: metav1.ObjectMeta{ + Name: "matching-peering", + Namespace: "ns", + }, + Spec: akov2.AtlasNetworkPeeringSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ConnectionSecret: &api.LocalObjectReference{ + Name: "matching-peering-secret-ref", + }, + }, + }, + }, + }, + want: []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: "matching-peering", + Namespace: "ns", + }}, + }, + }, } { scheme := runtime.NewScheme() assert.NoError(t, corev1.AddToScheme(scheme)) @@ -471,5 +524,6 @@ func testIndexers(t *testing.T) map[string]*LocalCredentialIndexer { indexers[AtlasCustomRoleCredentialsIndex] = NewAtlasCustomRoleByCredentialIndexer(logger) indexers[AtlasPrivateEndpointCredentialsIndex] = NewAtlasPrivateEndpointByCredentialIndexer(logger) indexers[AtlasNetworkContainerCredentialsIndex] = NewAtlasNetworkContainerByCredentialIndexer(logger) + indexers[AtlasNetworkPeeringCredentialsIndex] = NewAtlasNetworkPeeringByCredentialIndexer(logger) return indexers } diff --git a/internal/mocks/translation/atlas_deployments_service.go b/internal/mocks/translation/atlas_deployments_service.go index b79f3bc8d6..268c380a66 100644 --- a/internal/mocks/translation/atlas_deployments_service.go +++ b/internal/mocks/translation/atlas_deployments_service.go @@ -7,8 +7,9 @@ import ( mock "github.com/stretchr/testify/mock" - v1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" deployment "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/deployment" + + v1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" ) // AtlasDeploymentsServiceMock is an autogenerated mock type for the AtlasDeploymentsService type diff --git a/internal/mocks/translation/deployment.go b/internal/mocks/translation/deployment.go index 9647094a8f..14bc172e51 100644 --- a/internal/mocks/translation/deployment.go +++ b/internal/mocks/translation/deployment.go @@ -22,6 +22,61 @@ func (_m *DeploymentMock) EXPECT() *DeploymentMock_Expecter { return &DeploymentMock_Expecter{mock: &_m.Mock} } +// Deprecated provides a mock function with no fields +func (_m *DeploymentMock) Deprecated() (bool, string) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Deprecated") + } + + var r0 bool + var r1 string + if rf, ok := ret.Get(0).(func() (bool, string)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() string); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(string) + } + + return r0, r1 +} + +// DeploymentMock_Deprecated_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deprecated' +type DeploymentMock_Deprecated_Call struct { + *mock.Call +} + +// Deprecated is a helper method to define mock.On call +func (_e *DeploymentMock_Expecter) Deprecated() *DeploymentMock_Deprecated_Call { + return &DeploymentMock_Deprecated_Call{Call: _e.mock.On("Deprecated")} +} + +func (_c *DeploymentMock_Deprecated_Call) Run(run func()) *DeploymentMock_Deprecated_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DeploymentMock_Deprecated_Call) Return(_a0 bool, _a1 string) *DeploymentMock_Deprecated_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DeploymentMock_Deprecated_Call) RunAndReturn(run func() (bool, string)) *DeploymentMock_Deprecated_Call { + _c.Call.Return(run) + return _c +} + // GetConnection provides a mock function with no fields func (_m *DeploymentMock) GetConnection() *status.ConnectionStrings { ret := _m.Called() diff --git a/internal/mocks/translation/network_peering_service.go b/internal/mocks/translation/network_peering_service.go new file mode 100644 index 0000000000..873fb51090 --- /dev/null +++ b/internal/mocks/translation/network_peering_service.go @@ -0,0 +1,270 @@ +// Code generated by mockery. DO NOT EDIT. + +package translation + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + v1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + networkpeering "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" +) + +// NetworkPeeringServiceMock is an autogenerated mock type for the NetworkPeeringService type +type NetworkPeeringServiceMock struct { + mock.Mock +} + +type NetworkPeeringServiceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *NetworkPeeringServiceMock) EXPECT() *NetworkPeeringServiceMock_Expecter { + return &NetworkPeeringServiceMock_Expecter{mock: &_m.Mock} +} + +// Create provides a mock function with given fields: ctx, projectID, containerID, cfg +func (_m *NetworkPeeringServiceMock) Create(ctx context.Context, projectID string, containerID string, cfg *v1.AtlasNetworkPeeringConfig) (*networkpeering.NetworkPeer, error) { + ret := _m.Called(ctx, projectID, containerID, cfg) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 *networkpeering.NetworkPeer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *v1.AtlasNetworkPeeringConfig) (*networkpeering.NetworkPeer, error)); ok { + return rf(ctx, projectID, containerID, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, *v1.AtlasNetworkPeeringConfig) *networkpeering.NetworkPeer); ok { + r0 = rf(ctx, projectID, containerID, cfg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkpeering.NetworkPeer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, *v1.AtlasNetworkPeeringConfig) error); ok { + r1 = rf(ctx, projectID, containerID, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkPeeringServiceMock_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create' +type NetworkPeeringServiceMock_Create_Call struct { + *mock.Call +} + +// Create is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - containerID string +// - cfg *v1.AtlasNetworkPeeringConfig +func (_e *NetworkPeeringServiceMock_Expecter) Create(ctx interface{}, projectID interface{}, containerID interface{}, cfg interface{}) *NetworkPeeringServiceMock_Create_Call { + return &NetworkPeeringServiceMock_Create_Call{Call: _e.mock.On("Create", ctx, projectID, containerID, cfg)} +} + +func (_c *NetworkPeeringServiceMock_Create_Call) Run(run func(ctx context.Context, projectID string, containerID string, cfg *v1.AtlasNetworkPeeringConfig)) *NetworkPeeringServiceMock_Create_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(*v1.AtlasNetworkPeeringConfig)) + }) + return _c +} + +func (_c *NetworkPeeringServiceMock_Create_Call) Return(_a0 *networkpeering.NetworkPeer, _a1 error) *NetworkPeeringServiceMock_Create_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkPeeringServiceMock_Create_Call) RunAndReturn(run func(context.Context, string, string, *v1.AtlasNetworkPeeringConfig) (*networkpeering.NetworkPeer, error)) *NetworkPeeringServiceMock_Create_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: ctx, projectID, peerID +func (_m *NetworkPeeringServiceMock) Delete(ctx context.Context, projectID string, peerID string) error { + ret := _m.Called(ctx, projectID, peerID) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, projectID, peerID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NetworkPeeringServiceMock_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type NetworkPeeringServiceMock_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - peerID string +func (_e *NetworkPeeringServiceMock_Expecter) Delete(ctx interface{}, projectID interface{}, peerID interface{}) *NetworkPeeringServiceMock_Delete_Call { + return &NetworkPeeringServiceMock_Delete_Call{Call: _e.mock.On("Delete", ctx, projectID, peerID)} +} + +func (_c *NetworkPeeringServiceMock_Delete_Call) Run(run func(ctx context.Context, projectID string, peerID string)) *NetworkPeeringServiceMock_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *NetworkPeeringServiceMock_Delete_Call) Return(_a0 error) *NetworkPeeringServiceMock_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *NetworkPeeringServiceMock_Delete_Call) RunAndReturn(run func(context.Context, string, string) error) *NetworkPeeringServiceMock_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, projectID, peerID +func (_m *NetworkPeeringServiceMock) Get(ctx context.Context, projectID string, peerID string) (*networkpeering.NetworkPeer, error) { + ret := _m.Called(ctx, projectID, peerID) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *networkpeering.NetworkPeer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*networkpeering.NetworkPeer, error)); ok { + return rf(ctx, projectID, peerID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *networkpeering.NetworkPeer); ok { + r0 = rf(ctx, projectID, peerID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkpeering.NetworkPeer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, projectID, peerID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkPeeringServiceMock_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type NetworkPeeringServiceMock_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - projectID string +// - peerID string +func (_e *NetworkPeeringServiceMock_Expecter) Get(ctx interface{}, projectID interface{}, peerID interface{}) *NetworkPeeringServiceMock_Get_Call { + return &NetworkPeeringServiceMock_Get_Call{Call: _e.mock.On("Get", ctx, projectID, peerID)} +} + +func (_c *NetworkPeeringServiceMock_Get_Call) Run(run func(ctx context.Context, projectID string, peerID string)) *NetworkPeeringServiceMock_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *NetworkPeeringServiceMock_Get_Call) Return(_a0 *networkpeering.NetworkPeer, _a1 error) *NetworkPeeringServiceMock_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkPeeringServiceMock_Get_Call) RunAndReturn(run func(context.Context, string, string) (*networkpeering.NetworkPeer, error)) *NetworkPeeringServiceMock_Get_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, pojectID, peerID, containerID, cfg +func (_m *NetworkPeeringServiceMock) Update(ctx context.Context, pojectID string, peerID string, containerID string, cfg *v1.AtlasNetworkPeeringConfig) (*networkpeering.NetworkPeer, error) { + ret := _m.Called(ctx, pojectID, peerID, containerID, cfg) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 *networkpeering.NetworkPeer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *v1.AtlasNetworkPeeringConfig) (*networkpeering.NetworkPeer, error)); ok { + return rf(ctx, pojectID, peerID, containerID, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *v1.AtlasNetworkPeeringConfig) *networkpeering.NetworkPeer); ok { + r0 = rf(ctx, pojectID, peerID, containerID, cfg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*networkpeering.NetworkPeer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, *v1.AtlasNetworkPeeringConfig) error); ok { + r1 = rf(ctx, pojectID, peerID, containerID, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetworkPeeringServiceMock_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type NetworkPeeringServiceMock_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - pojectID string +// - peerID string +// - containerID string +// - cfg *v1.AtlasNetworkPeeringConfig +func (_e *NetworkPeeringServiceMock_Expecter) Update(ctx interface{}, pojectID interface{}, peerID interface{}, containerID interface{}, cfg interface{}) *NetworkPeeringServiceMock_Update_Call { + return &NetworkPeeringServiceMock_Update_Call{Call: _e.mock.On("Update", ctx, pojectID, peerID, containerID, cfg)} +} + +func (_c *NetworkPeeringServiceMock_Update_Call) Run(run func(ctx context.Context, pojectID string, peerID string, containerID string, cfg *v1.AtlasNetworkPeeringConfig)) *NetworkPeeringServiceMock_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(*v1.AtlasNetworkPeeringConfig)) + }) + return _c +} + +func (_c *NetworkPeeringServiceMock_Update_Call) Return(_a0 *networkpeering.NetworkPeer, _a1 error) *NetworkPeeringServiceMock_Update_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NetworkPeeringServiceMock_Update_Call) RunAndReturn(run func(context.Context, string, string, string, *v1.AtlasNetworkPeeringConfig) (*networkpeering.NetworkPeer, error)) *NetworkPeeringServiceMock_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewNetworkPeeringServiceMock creates a new instance of NetworkPeeringServiceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkPeeringServiceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *NetworkPeeringServiceMock { + mock := &NetworkPeeringServiceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/translation/networkcontainer/conversion.go b/internal/translation/networkcontainer/conversion.go index 52b281792b..19fc051552 100644 --- a/internal/translation/networkcontainer/conversion.go +++ b/internal/translation/networkcontainer/conversion.go @@ -14,10 +14,28 @@ type NetworkContainerConfig struct { akov2.AtlasNetworkContainerConfig } +type AWSContainerStatus struct { + VpcID string + ContainerID string +} + +type AzureContainerStatus struct { + AzureSubscriptionID string + VnetName string +} + +type GCPContainerStatus struct { + GCPProjectID string + NetworkName string +} + type NetworkContainer struct { NetworkContainerConfig ID string Provisioned bool + AWSStatus *AWSContainerStatus + AzureStatus *AzureContainerStatus + GCPStatus *GCPContainerStatus } func NewNetworkContainerConfig(provider string, config *akov2.AtlasNetworkContainerConfig) *NetworkContainerConfig { @@ -54,6 +72,14 @@ func toAtlasConfig(cfg *NetworkContainerConfig) *admin.CloudProviderContainer { func fromAtlas(container *admin.CloudProviderContainer) *NetworkContainer { pc := fromAtlasNoStatus(container) pc.Provisioned = container.GetProvisioned() + switch provider.ProviderName(pc.Provider) { + case provider.ProviderAWS: + pc.AWSStatus = fromAtlasAWSStatus(container) + case provider.ProviderAzure: + pc.AzureStatus = fromAtlasAzureStatus(container) + case provider.ProviderGCP: + pc.GCPStatus = fromAtlasGCPStatus(container) + } return pc } @@ -73,3 +99,32 @@ func fromAtlasNoStatus(container *admin.CloudProviderContainer) *NetworkContaine ID: container.GetId(), } } + +func fromAtlasAWSStatus(container *admin.CloudProviderContainer) *AWSContainerStatus { + if container.VpcId == nil { + return nil + } + return &AWSContainerStatus{ + VpcID: container.GetVpcId(), + } +} + +func fromAtlasAzureStatus(container *admin.CloudProviderContainer) *AzureContainerStatus { + if container.AzureSubscriptionId == nil && container.VnetName == nil { + return nil + } + return &AzureContainerStatus{ + AzureSubscriptionID: container.GetAzureSubscriptionId(), + VnetName: container.GetVnetName(), + } +} + +func fromAtlasGCPStatus(container *admin.CloudProviderContainer) *GCPContainerStatus { + if container.GcpProjectId == nil && container.NetworkName == nil { + return nil + } + return &GCPContainerStatus{ + GCPProjectID: container.GetGcpProjectId(), + NetworkName: container.GetNetworkName(), + } +} diff --git a/internal/translation/networkcontainer/conversion_test.go b/internal/translation/networkcontainer/conversion_test.go index ee5fb3b160..63e4108c46 100644 --- a/internal/translation/networkcontainer/conversion_test.go +++ b/internal/translation/networkcontainer/conversion_test.go @@ -34,4 +34,8 @@ func FuzzConvertContainer(f *testing.F) { func cleanupContainer(container *NetworkContainer) { container.AtlasNetworkContainerConfig.ID = "" + // status fields are only populated from Atlas they do not complete a roundtrip + container.AWSStatus = nil + container.AzureStatus = nil + container.GCPStatus = nil } diff --git a/internal/translation/networkcontainer/networkcontainer_test.go b/internal/translation/networkcontainer/networkcontainer_test.go index c16e52ee65..73aef4e239 100644 --- a/internal/translation/networkcontainer/networkcontainer_test.go +++ b/internal/translation/networkcontainer/networkcontainer_test.go @@ -67,7 +67,8 @@ func TestNetworkContainerCreate(t *testing.T) { Provider: string(provider.ProviderAWS), AtlasNetworkContainerConfig: testContainerConfig(), }, - ID: testContainerID, + ID: testContainerID, + AWSStatus: &networkcontainer.AWSContainerStatus{VpcID: testVpcID}, }, expectedError: nil, }, @@ -121,6 +122,10 @@ func TestNetworkContainerCreate(t *testing.T) { AtlasNetworkContainerConfig: testContainerConfig(), }, ID: testContainerID, + AzureStatus: &networkcontainer.AzureContainerStatus{ + AzureSubscriptionID: testAzureSubcriptionID, + VnetName: testVnet, + }, }, expectedError: nil, }, @@ -172,6 +177,10 @@ func TestNetworkContainerCreate(t *testing.T) { AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{CIDRBlock: "1.1.1.1/2"}, }, ID: testContainerID, + GCPStatus: &networkcontainer.GCPContainerStatus{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, }, expectedError: nil, }, @@ -246,7 +255,8 @@ func TestNetworkContainerGet(t *testing.T) { Provider: string(provider.ProviderAWS), AtlasNetworkContainerConfig: testContainerConfig(), }, - ID: testContainerID, + ID: testContainerID, + AWSStatus: &networkcontainer.AWSContainerStatus{VpcID: testVpcID}, }, expectedError: nil, }, @@ -307,7 +317,8 @@ func TestNetworkContainerFind(t *testing.T) { Provider: string(provider.ProviderAWS), AtlasNetworkContainerConfig: testContainerConfig(), }, - ID: testContainerID, + ID: testContainerID, + AWSStatus: &networkcontainer.AWSContainerStatus{VpcID: testVpcID}, }, expectedError: nil, }, @@ -369,6 +380,10 @@ func TestNetworkContainerFind(t *testing.T) { }, }, ID: testContainerID, + GCPStatus: &networkcontainer.GCPContainerStatus{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, }, expectedError: nil, }, @@ -472,7 +487,8 @@ func TestNetworkContainerUpdate(t *testing.T) { Provider: string(provider.ProviderAWS), AtlasNetworkContainerConfig: testContainerConfig(), }, - ID: testContainerID, + ID: testContainerID, + AWSStatus: &networkcontainer.AWSContainerStatus{VpcID: testVpcID}, }, expectedError: nil, }, diff --git a/internal/translation/networkpeering/conversion.go b/internal/translation/networkpeering/conversion.go index 03c9dba1da..fb2b6a86f7 100644 --- a/internal/translation/networkpeering/conversion.go +++ b/internal/translation/networkpeering/conversion.go @@ -1,42 +1,108 @@ package networkpeering import ( + "errors" "fmt" + "reflect" "go.mongodb.org/atlas-sdk/v20231115008/admin" akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/status" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" +) + +var ( + // ErrUnsupportedProvider marks an error when parsing an invalid provider input + ErrUnsupportedProvider = errors.New("unsupported provider") ) type NetworkPeer struct { akov2.AtlasNetworkPeeringConfig - ID string + ID string + ContainerID string + Status string + ErrorMessage string + AWSStatus *status.AWSPeeringStatus } -func NewNetworkPeer(id string, cfg *akov2.AtlasNetworkPeeringConfig) *NetworkPeer { - return &NetworkPeer{ - AtlasNetworkPeeringConfig: *cfg, - ID: id, +func (np *NetworkPeer) Failed() bool { + return np.ErrorMessage != "" +} + +func (np *NetworkPeer) AWSConnectionID() string { + if np.AWSStatus == nil { + return "" } + return np.AWSStatus.ConnectionID } -type ProviderContainer struct { - akov2.AtlasProviderContainerConfig - ID string - Provider string +func (np *NetworkPeer) String() string { + return fmt.Sprintf("NetworkPeer for %s ID=%s ContainerID=%s\nConfig:%v\nStatus:%v", + np.Provider, np.ID, np.ContainerID, np.configString(), np.statusString()) } -func NewProviderContainer(id string, provider string, cfg *akov2.AtlasProviderContainerConfig) *ProviderContainer { - return &ProviderContainer{ - AtlasProviderContainerConfig: *cfg, - ID: id, - Provider: provider, +func (np *NetworkPeer) configString() string { + aws := "" + if np.AWSConfiguration != nil { + cfg := np.AWSConfiguration + aws = fmt.Sprintf("AWSCfg:{ AccepterRegionName=%s AccountID=%s RouteTableCIDRBlock=%s VpcID=%s } ", + cfg.AccepterRegionName, cfg.AWSAccountID, cfg.RouteTableCIDRBlock, cfg.VpcID) + } + azure := "" + if np.AzureConfiguration != nil { + cfg := np.AzureConfiguration + azure = fmt.Sprintf("AzureCfg:{ AzureDirectoryID=%s AzureSubscriptionID=%s ResourceGroupName=%s VnetName=%s } ", + cfg.AzureDirectoryID, cfg.AzureSubscriptionID, cfg.ResourceGroupName, cfg.VNetName) + } + google := "" + if np.GCPConfiguration != nil { + cfg := np.GCPConfiguration + google = fmt.Sprintf("GoogleCfg:{ GCPProjectID=%s NetworkName=%s } ", + cfg.GCPProjectID, cfg.NetworkName) + } + return fmt.Sprintf("{%s%s%s}", aws, azure, google) +} + +func (np *NetworkPeer) statusString() string { + tail := "" + if np.AWSStatus != nil { + tail = fmt.Sprintf(" AWSStatus:{ConnectionId=%s}", np.AWSStatus.ConnectionID) + } + return fmt.Sprintf("{Status=%q ErrorMessage=%q%s}", np.Status, np.ErrorMessage, tail) +} + +// Available returns whether or not the Network Peering is connected and ready to use +func (np *NetworkPeer) Available() bool { + return np.Status == "AVAILABLE" +} + +// Closing returns whether or not the Network Peering is being shut down +func (np *NetworkPeer) Closing() bool { + // GCP DELETING AWS TERMINATING AZURE ? + return np.Status == "DELETING" || np.Status == "TERMINATING" +} + +// UpdateStatus copies the network peering status fields only from the given peer input +func (np *NetworkPeer) UpdateStatus(atlas *NetworkPeer) { + np.Status = atlas.Status + np.ErrorMessage = atlas.ErrorMessage + if np.Provider == string(provider.ProviderAWS) && atlas.AWSStatus != nil { + np.AWSStatus = atlas.AWSStatus.DeepCopy() + } +} + +// NewNetworkPeer creates a network peering from the given config +func NewNetworkPeer(id string, cfg *akov2.AtlasNetworkPeeringConfig) *NetworkPeer { + return &NetworkPeer{ + AtlasNetworkPeeringConfig: *cfg, + ID: id, } } -func toAtlasConnection(peer *NetworkPeer) (*admin.BaseNetworkPeeringConnectionSettings, error) { +func toAtlas(peer *NetworkPeer) (*admin.BaseNetworkPeeringConnectionSettings, error) { switch peer.Provider { case string(provider.ProviderAWS): if peer.AWSConfiguration == nil { @@ -73,18 +139,49 @@ func toAtlasConnection(peer *NetworkPeer) (*admin.BaseNetworkPeeringConnectionSe VnetName: pointer.SetOrNil(peer.AzureConfiguration.VNetName, ""), }, nil default: - return nil, fmt.Errorf("unsupported provider %q", peer.Provider) + return nil, fmt.Errorf("%w %q", ErrUnsupportedProvider, peer.Provider) } } -func fromAtlasConnection(conn *admin.BaseNetworkPeeringConnectionSettings) (*NetworkPeer, error) { +func fromAtlas(conn *admin.BaseNetworkPeeringConnectionSettings) (*NetworkPeer, error) { + networkPeer, err := fromAtlasConnectionNoStatus(conn) + if err != nil { + return nil, fmt.Errorf("failed to convert BaseNetworkPeeringConnectionSettings to NetworkPeer: %w", err) + } + switch provider.ProviderName(conn.GetProviderName()) { + case provider.ProviderAWS: + networkPeer.Status = conn.GetStatusName() + networkPeer.ErrorMessage = conn.GetErrorStateName() + networkPeer.AWSStatus = fromAtlasAWSStatus(conn) + case provider.ProviderGCP: + networkPeer.Status = conn.GetStatus() + networkPeer.ErrorMessage = conn.GetErrorMessage() + case provider.ProviderAzure: + networkPeer.Status = conn.GetStatus() + networkPeer.ErrorMessage = conn.GetErrorState() + default: + return nil, fmt.Errorf("%w %q", ErrUnsupportedProvider, conn.GetProviderName()) + } + return networkPeer, nil +} + +func fromAtlasAWSStatus(conn *admin.BaseNetworkPeeringConnectionSettings) *status.AWSPeeringStatus { + if conn.ConnectionId == nil { + return nil + } + return &status.AWSPeeringStatus{ + ConnectionID: conn.GetConnectionId(), + } +} + +func fromAtlasConnectionNoStatus(conn *admin.BaseNetworkPeeringConnectionSettings) (*NetworkPeer, error) { switch provider.ProviderName(conn.GetProviderName()) { case provider.ProviderAWS: return &NetworkPeer{ - ID: conn.GetId(), + ID: conn.GetId(), + ContainerID: conn.GetContainerId(), AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ - ContainerID: conn.GetContainerId(), - Provider: conn.GetProviderName(), + Provider: conn.GetProviderName(), AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ AccepterRegionName: conn.GetAccepterRegionName(), AWSAccountID: conn.GetAwsAccountId(), @@ -95,10 +192,10 @@ func fromAtlasConnection(conn *admin.BaseNetworkPeeringConnectionSettings) (*Net }, nil case provider.ProviderGCP: return &NetworkPeer{ - ID: conn.GetId(), + ID: conn.GetId(), + ContainerID: conn.GetContainerId(), AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ - ContainerID: conn.GetContainerId(), - Provider: conn.GetProviderName(), + Provider: conn.GetProviderName(), GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ GCPProjectID: conn.GetGcpProjectId(), NetworkName: conn.GetNetworkName(), @@ -107,10 +204,10 @@ func fromAtlasConnection(conn *admin.BaseNetworkPeeringConnectionSettings) (*Net }, nil case provider.ProviderAzure: return &NetworkPeer{ - ID: conn.GetId(), + ID: conn.GetId(), + ContainerID: conn.GetContainerId(), AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ - ContainerID: conn.GetContainerId(), - Provider: conn.GetProviderName(), + Provider: conn.GetProviderName(), AzureConfiguration: &akov2.AzureNetworkPeeringConfiguration{ AzureDirectoryID: conn.GetAzureDirectoryId(), AzureSubscriptionID: conn.GetAzureSubscriptionId(), @@ -120,7 +217,7 @@ func fromAtlasConnection(conn *admin.BaseNetworkPeeringConnectionSettings) (*Net }, }, nil default: - return nil, fmt.Errorf("unsupported provider %q", conn.GetProviderName()) + return nil, fmt.Errorf("%w %q", ErrUnsupportedProvider, conn.GetProviderName()) } } @@ -130,7 +227,7 @@ func fromAtlasConnectionList(list []admin.BaseNetworkPeeringConnectionSettings) } peers := make([]NetworkPeer, 0, len(list)) for i, conn := range list { - c, err := fromAtlasConnection(&conn) + c, err := fromAtlas(&conn) if err != nil { return nil, fmt.Errorf("failed to convert connection list item %d: %w", i, err) } @@ -139,42 +236,58 @@ func fromAtlasConnectionList(list []admin.BaseNetworkPeeringConnectionSettings) return peers, nil } -func toAtlasContainer(container *ProviderContainer) *admin.CloudProviderContainer { - cpc := &admin.CloudProviderContainer{ - Id: pointer.SetOrNil(container.ID, ""), - ProviderName: pointer.SetOrNil(container.Provider, ""), - AtlasCidrBlock: pointer.SetOrNil(container.AtlasCIDRBlock, ""), +func CompareConfigs(a, b *NetworkPeer) bool { + aCopy := a.DeepCopy() + bCopy := b.DeepCopy() + // accpter region cannot be updated and it might be empty when it matches the container region + // so we clear it here to avoid finding bogus differences + if aCopy.AWSConfiguration != nil { + aCopy.AWSConfiguration.AccepterRegionName = "" } - if cpc.GetProviderName() == string(provider.ProviderAWS) { - cpc.RegionName = pointer.SetOrNil(container.ContainerRegion, "") - } else { - cpc.Region = pointer.SetOrNil(container.ContainerRegion, "") + if bCopy.AWSConfiguration != nil { + bCopy.AWSConfiguration.AccepterRegionName = "" } - return cpc + return reflect.DeepEqual(aCopy, bCopy) } -func fromAtlasContainer(container *admin.CloudProviderContainer) *ProviderContainer { - region := container.GetRegion() - if container.GetProviderName() == string(provider.ProviderAWS) { - region = container.GetRegionName() - } - return &ProviderContainer{ - ID: container.GetId(), - Provider: container.GetProviderName(), - AtlasProviderContainerConfig: akov2.AtlasProviderContainerConfig{ - AtlasCIDRBlock: container.GetAtlasCidrBlock(), - ContainerRegion: region, - }, +func ApplyPeeringStatus(peeringStatus *status.AtlasNetworkPeeringStatus, peer *NetworkPeer, container *networkcontainer.NetworkContainer) { + peeringStatus.ID = peer.ID + peeringStatus.Status = peer.Status + switch provider.ProviderName(peer.Provider) { + case provider.ProviderAWS: + if container.AWSStatus != nil { + if peeringStatus.AWSStatus == nil { + peeringStatus.AWSStatus = &status.AWSPeeringStatus{} + } + peeringStatus.AWSStatus.ConnectionID = peer.AWSStatus.ConnectionID + peeringStatus.AWSStatus.VpcID = container.AWSStatus.VpcID + } + case provider.ProviderAzure: + if container.AzureStatus != nil { + if peeringStatus.AzureStatus == nil { + peeringStatus.AzureStatus = &status.AzurePeeringStatus{} + } + peeringStatus.AzureStatus.AzureSubscriptionID = container.AzureStatus.AzureSubscriptionID + peeringStatus.AzureStatus.VnetName = container.AzureStatus.VnetName + } + case provider.ProviderGCP: + if container.GCPStatus != nil { + if peeringStatus.GCPStatus == nil { + peeringStatus.GCPStatus = &status.GCPPeeringStatus{} + } + peeringStatus.GCPStatus.GCPProjectID = container.GCPStatus.GCPProjectID + peeringStatus.GCPStatus.NetworkName = container.GCPStatus.NetworkName + } + default: + peeringStatus.Status = fmt.Sprintf("unsupported provider: %q", peer.Provider) + return } } -func fromAtlasContainerList(list []admin.CloudProviderContainer) []ProviderContainer { - if list == nil { - return nil - } - containers := make([]ProviderContainer, 0, len(list)) - for _, container := range list { - containers = append(containers, *fromAtlasContainer(&container)) - } - return containers +func ClearPeeringStatus(peeringStatus *status.AtlasNetworkPeeringStatus) { + peeringStatus.ID = "" + peeringStatus.Status = "" + peeringStatus.AWSStatus = nil + peeringStatus.AzureStatus = nil + peeringStatus.GCPStatus = nil } diff --git a/internal/translation/networkpeering/conversion_test.go b/internal/translation/networkpeering/conversion_test.go index d992e8d4a4..40d2c22b23 100644 --- a/internal/translation/networkpeering/conversion_test.go +++ b/internal/translation/networkpeering/conversion_test.go @@ -26,12 +26,10 @@ func FuzzConvertConnection(f *testing.F) { } f.Fuzz(func(t *testing.T, data []byte, index uint) { peerData := NetworkPeer{} - gofuzz.NewFromGoFuzz(data).Fuzz(&peerData) - peerData.Provider = providerNames[index%3] - cleanupPeer(&peerData) - atlasConn, err := toAtlasConnection(&peerData) + fuzzPeer(gofuzz.NewFromGoFuzz(data), index, &peerData) + atlasConn, err := toAtlas(&peerData) require.NoError(t, err) - result, err := fromAtlasConnection(atlasConn) + result, err := fromAtlas(atlasConn) require.NoError(t, err) assert.Equal(t, &peerData, result, "failed for index=%d", index) }) @@ -46,15 +44,13 @@ func FuzzConvertListOfConnections(f *testing.F) { expected := []NetworkPeer{} for i := uint(0); i < size; i++ { peerData := NetworkPeer{} - gofuzz.NewFromGoFuzz(data).Fuzz(&peerData) - peerData.Provider = providerNames[index%3] - cleanupPeer(&peerData) - atlasConn, err := toAtlasConnection(&peerData) + fuzzPeer(gofuzz.NewFromGoFuzz(data), index, &peerData) + atlasConn, err := toAtlas(&peerData) require.NoError(t, err) - expectedConn, err := fromAtlasConnection(atlasConn) + expectedConn, err := fromAtlas(atlasConn) require.NoError(t, err) expected = append(expected, *expectedConn) - atlasConnItem, err := toAtlasConnection(&peerData) + atlasConnItem, err := toAtlas(&peerData) require.NoError(t, err) conns = append(conns, *atlasConnItem) } @@ -64,48 +60,23 @@ func FuzzConvertListOfConnections(f *testing.F) { }) } -func FuzzConvertContainer(f *testing.F) { - for i := uint(0); i < fuzzIterations; i++ { - f.Add(([]byte)(fmt.Sprintf("seed sample %x", i)), i) - } - f.Fuzz(func(t *testing.T, data []byte, index uint) { - containerData := ProviderContainer{} - gofuzz.NewFromGoFuzz(data).Fuzz(&containerData) - containerData.Provider = providerNames[index%3] - result := fromAtlasContainer(toAtlasContainer(&containerData)) - assert.Equal(t, &containerData, result, "failed for index=%d", index) - }) -} - -func FuzzConvertListOfContainers(f *testing.F) { - for i := uint(0); i < fuzzIterations; i++ { - f.Add(([]byte)(fmt.Sprintf("seed sample %x", i)), i, (i % 5)) - } - f.Fuzz(func(t *testing.T, data []byte, index uint, size uint) { - containers := []admin.CloudProviderContainer{} - expected := []ProviderContainer{} - for i := uint(0); i < size; i++ { - containerData := ProviderContainer{} - gofuzz.NewFromGoFuzz(data).Fuzz(&containerData) - containerData.Provider = providerNames[index%3] - expectedContainer := fromAtlasContainer(toAtlasContainer(&containerData)) - expected = append(expected, *expectedContainer) - containers = append(containers, *toAtlasContainer(&containerData)) - } - result := fromAtlasContainerList(containers) - assert.Equal(t, expected, result) - }) -} - -func cleanupPeer(peer *NetworkPeer) { - peer.ID = "" - if peer.Provider != string(provider.ProviderAWS) { +func fuzzPeer(fuzzer *gofuzz.Fuzzer, index uint, peer *NetworkPeer) { + fuzzer.NilChance(0).Fuzz(peer) + peer.ID = "" // ID is provided by Atlas, cannoy complete a roundtrip + peer.Provider = providerNames[index%3] // provider can only be one of 3 AWS, AZURE or GCP + switch peer.Provider { // only the selected provider config is expected + case string(provider.ProviderAWS): + peer.AzureConfiguration = nil + peer.GCPConfiguration = nil + case string(provider.ProviderAzure): peer.AWSConfiguration = nil - } - if peer.Provider != string(provider.ProviderGCP) { peer.GCPConfiguration = nil - } - if peer.Provider != string(provider.ProviderAzure) { + case string(provider.ProviderGCP): + peer.AWSConfiguration = nil peer.AzureConfiguration = nil } + // status fields are only populated from Atlas they do not complete a roundtrip + peer.Status = "" + peer.ErrorMessage = "" + peer.AWSStatus = nil } diff --git a/internal/translation/networkpeering/networkpeering.go b/internal/translation/networkpeering/networkpeering.go index f1f6e8fb9f..a9ad98dcae 100644 --- a/internal/translation/networkpeering/networkpeering.go +++ b/internal/translation/networkpeering/networkpeering.go @@ -2,141 +2,102 @@ package networkpeering import ( "context" + "errors" "fmt" - "net/http" "go.mongodb.org/atlas-sdk/v20231115008/admin" - "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" - "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" - "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/paging" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" ) -type PeerConnectionsService interface { - CreatePeer(ctx context.Context, projectID string, conn *NetworkPeer) (*NetworkPeer, error) - ListPeers(ctx context.Context, projectID string) ([]NetworkPeer, error) - DeletePeer(ctx context.Context, projectID, containerID string) error -} +var ( + // ErrNotFound means an resource is missing + ErrNotFound = errors.New("not found") -type PeeringContainerService interface { - CreateContainer(ctx context.Context, projectID string, container *ProviderContainer) (*ProviderContainer, error) - GetContainer(ctx context.Context, projectID, containerID string) (*ProviderContainer, error) - ListContainers(ctx context.Context, projectID, providerName string) ([]ProviderContainer, error) - DeleteContainer(ctx context.Context, projectID, containerID string) error -} + // ErrContainerInUse is a failure to remove a containe still in use + ErrContainerInUse = errors.New("container still in use") +) type NetworkPeeringService interface { - PeerConnectionsService - PeeringContainerService + Create(ctx context.Context, projectID, containerID string, cfg *akov2.AtlasNetworkPeeringConfig) (*NetworkPeer, error) + Get(ctx context.Context, projectID, peerID string) (*NetworkPeer, error) + Update(ctx context.Context, pojectID, peerID, containerID string, cfg *akov2.AtlasNetworkPeeringConfig) (*NetworkPeer, error) + Delete(ctx context.Context, projectID, peerID string) error } type networkPeeringService struct { peeringAPI admin.NetworkPeeringApi } +func NewNetworkPeeringServiceFromClientSet(clientSet *atlas.ClientSet) NetworkPeeringService { + return NewNetworkPeeringService(clientSet.SdkClient20231115008.NetworkPeeringApi) +} + func NewNetworkPeeringService(peeringAPI admin.NetworkPeeringApi) NetworkPeeringService { return &networkPeeringService{peeringAPI: peeringAPI} } -func (np *networkPeeringService) CreatePeer(ctx context.Context, projectID string, conn *NetworkPeer) (*NetworkPeer, error) { - atlasConnRequest, err := toAtlasConnection(conn) +func (np *networkPeeringService) Create(ctx context.Context, projectID, containerID string, cfg *akov2.AtlasNetworkPeeringConfig) (*NetworkPeer, error) { + atlasConnRequest, err := toAtlas(&NetworkPeer{ + AtlasNetworkPeeringConfig: *cfg, + ContainerID: containerID, + }) if err != nil { return nil, fmt.Errorf("failed to convert peer to Atlas: %w", err) } newAtlasConn, _, err := np.peeringAPI.CreatePeeringConnection(ctx, projectID, atlasConnRequest).Execute() if err != nil { - return nil, fmt.Errorf("failed to create network peer %v: %w", conn, err) + return nil, fmt.Errorf("failed to create network peer from config %v: %w", cfg, err) } - newConn, err := fromAtlasConnection(newAtlasConn) + newPeer, err := fromAtlas(newAtlasConn) if err != nil { return nil, fmt.Errorf("failed to convert peer from Atlas: %w", err) } - return newConn, nil + return newPeer, nil } -func (np *networkPeeringService) ListPeers(ctx context.Context, projectID string) ([]NetworkPeer, error) { - var peersList []NetworkPeer - providers := []provider.ProviderName{provider.ProviderAWS, provider.ProviderAzure, provider.ProviderGCP} - for _, providerName := range providers { - peers, err := np.listPeersForProvider(ctx, projectID, providerName) - if err != nil { - return nil, fmt.Errorf("failed to list network peers for %s: %w", string(providerName), err) +func (np *networkPeeringService) Get(ctx context.Context, projectID, peerID string) (*NetworkPeer, error) { + atlasConn, _, err := np.peeringAPI.GetPeeringConnection(ctx, projectID, peerID).Execute() + if err != nil { + if admin.IsErrorCode(err, "PEER_NOT_FOUND") { + return nil, ErrNotFound } - peersList = append(peersList, peers...) + return nil, fmt.Errorf("failed to get network peer for peer id %v: %w", peerID, err) } - return peersList, nil -} - -func (np *networkPeeringService) listPeersForProvider(ctx context.Context, projectID string, providerName provider.ProviderName) ([]NetworkPeer, error) { - results, err := paging.ListAll(ctx, func(ctx context.Context, pageNum int) (paging.Response[admin.BaseNetworkPeeringConnectionSettings], *http.Response, error) { - p := &admin.ListPeeringConnectionsApiParams{ - GroupId: projectID, - ProviderName: admin.PtrString(string(providerName)), - } - return np.peeringAPI.ListPeeringConnectionsWithParams(ctx, p).PageNum(pageNum).Execute() - }) + peer, err := fromAtlas(atlasConn) if err != nil { - return nil, fmt.Errorf("failed to list network peers: %w", err) + return nil, fmt.Errorf("failed to convert peer from Atlas: %w", err) } - - return fromAtlasConnectionList(results) + return peer, nil } -func (np *networkPeeringService) DeletePeer(ctx context.Context, projectID, peerID string) error { - _, _, err := np.peeringAPI.DeletePeeringConnection(ctx, projectID, peerID).Execute() - if admin.IsErrorCode(err, "PEER_ALREADY_REQUESTED_DELETION") || admin.IsErrorCode(err, "PEER_NOT_FOUND") { - return nil // if it was already removed or being removed it is also fine - } +func (np *networkPeeringService) Update(ctx context.Context, projectID, peerID, containerID string, cfg *akov2.AtlasNetworkPeeringConfig) (*NetworkPeer, error) { + atlasConnRequest, err := toAtlas(&NetworkPeer{ + AtlasNetworkPeeringConfig: *cfg, + ContainerID: containerID, + }) if err != nil { - return fmt.Errorf("failed to delete peering connection for peer %s: %w", peerID, err) + return nil, fmt.Errorf("failed to convert peer to Atlas: %w", err) } - return nil -} - -func (np *networkPeeringService) CreateContainer(ctx context.Context, projectID string, container *ProviderContainer) (*ProviderContainer, error) { - newContainer, _, err := np.peeringAPI.CreatePeeringContainer(ctx, projectID, toAtlasContainer(container)).Execute() + newAtlasConn, _, err := np.peeringAPI.UpdatePeeringConnection(ctx, projectID, peerID, atlasConnRequest).Execute() if err != nil { - return nil, fmt.Errorf("failed to create peering container %s: %w", container.ID, err) + return nil, fmt.Errorf("failed to update network peer from config %v: %w", cfg, err) } - return fromAtlasContainer(newContainer), nil -} - -func (np *networkPeeringService) GetContainer(ctx context.Context, projectID, containerID string) (*ProviderContainer, error) { - container, _, err := np.peeringAPI.GetPeeringContainer(ctx, projectID, containerID).Execute() + newPeer, err := fromAtlas(newAtlasConn) if err != nil { - return nil, fmt.Errorf("failed to get container for gcp status %s: %w", containerID, err) - } - return fromAtlasContainer(container), nil -} - -func (np *networkPeeringService) ListContainers(ctx context.Context, projectID, providerName string) ([]ProviderContainer, error) { - results := []ProviderContainer{} - pageNum := 1 - listOpts := &admin.ListPeeringContainerByCloudProviderApiParams{ - GroupId: projectID, - ProviderName: pointer.SetOrNil(providerName, ""), - PageNum: pointer.MakePtr(pageNum), - } - for { - page, _, err := np.peeringAPI.ListPeeringContainerByCloudProviderWithParams(ctx, listOpts).Execute() - if err != nil { - return nil, fmt.Errorf("failed to list containers: %w", err) - } - results = append(results, fromAtlasContainerList(page.GetResults())...) - if len(results) >= page.GetTotalCount() { - return results, nil - } - pageNum += 1 + return nil, fmt.Errorf("failed to convert peer from Atlas: %w", err) } + return newPeer, nil } -func (np *networkPeeringService) DeleteContainer(ctx context.Context, projectID, containerID string) error { - _, _, err := np.peeringAPI.DeletePeeringContainer(ctx, projectID, containerID).Execute() - if admin.IsErrorCode(err, "CLOUD_PROVIDER_CONTAINER_NOT_FOUND") { - return nil +func (np *networkPeeringService) Delete(ctx context.Context, projectID, peerID string) error { + _, _, err := np.peeringAPI.DeletePeeringConnection(ctx, projectID, peerID).Execute() + if admin.IsErrorCode(err, "PEER_ALREADY_REQUESTED_DELETION") || admin.IsErrorCode(err, "PEER_NOT_FOUND") { + return errors.Join(err, ErrNotFound) } if err != nil { - return fmt.Errorf("failed to delete container: %w", err) + return fmt.Errorf("failed to delete peering connection for peer %s: %w", peerID, err) } return nil } diff --git a/internal/translation/networkpeering/networkpeering_test.go b/internal/translation/networkpeering/networkpeering_test.go new file mode 100644 index 0000000000..b065028951 --- /dev/null +++ b/internal/translation/networkpeering/networkpeering_test.go @@ -0,0 +1,451 @@ +package networkpeering_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.mongodb.org/atlas-sdk/v20231115008/admin" + "go.mongodb.org/atlas-sdk/v20231115008/mockadmin" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" +) + +const ( + testProjectID = "fake-test-project-id" + + testAWSSubscriptionID = "fake-subscription-id" + + testVpcID = "fake-vpc-id" + + testPeerID = "fake-peering-id" + + testContainerID = "fake-container-id" + + testAzureDirectoryID = "fake-azure-directorty-id" + + testAzureSubcriptionID = "fake-azure-subcription-id" + + testAzureResourceGroup = "fake-azure-resource-group" + + testVnet = "fake-vnet" + + testGCPProjectID = "fake-test-project" + + testNetworkName = "fake-test-network" +) + +var ( + ErrFakeFailure = errors.New("fake-failure") +) + +func TestNetworkPeeringCreate(t *testing.T) { + for _, tc := range []struct { + title string + cfg *akov2.AtlasNetworkPeeringConfig + api admin.NetworkPeeringApi + expectedPeer *networkpeering.NetworkPeer + expectedError error + }{ + { + title: "successful api create for AWS returns success", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "US_EAST_1", + AWSAccountID: testAWSSubscriptionID, + RouteTableCIDRBlock: "10.0.0.0/18", + VpcID: testVpcID, + }, + }, + api: testCreateNetworkPeeringAPI( + &admin.BaseNetworkPeeringConnectionSettings{ + ContainerId: testContainerID, + Id: pointer.MakePtr(testPeerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAWS)), + AccepterRegionName: pointer.MakePtr("US_EAST_1"), + AwsAccountId: pointer.MakePtr(testAWSSubscriptionID), + RouteTableCidrBlock: pointer.MakePtr("10.0.0.0/18"), + VpcId: pointer.MakePtr(testVpcID), + }, + nil, + ), + expectedPeer: &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "US_EAST_1", + AWSAccountID: testAWSSubscriptionID, + RouteTableCIDRBlock: "10.0.0.0/18", + VpcID: testVpcID, + }, + }, + ID: testPeerID, + ContainerID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "API failure gets passed through", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "US_EAST_1", + AWSAccountID: testAWSSubscriptionID, + RouteTableCIDRBlock: "10.0.0.0/18", + VpcID: testVpcID, + }, + }, + api: testCreateNetworkPeeringAPI( + nil, + ErrFakeFailure, + ), + expectedPeer: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "failure to parse config returns before calling API", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: "invalid provider", + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "US_EAST_1", + AWSAccountID: testAWSSubscriptionID, + RouteTableCIDRBlock: "10.0.0.0/18", + VpcID: testVpcID, + }, + }, + expectedPeer: nil, + expectedError: networkpeering.ErrUnsupportedProvider, + }, + + { + title: "failure to parse API reply", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "US_EAST_1", + AWSAccountID: testAWSSubscriptionID, + RouteTableCIDRBlock: "10.0.0.0/18", + VpcID: testVpcID, + }, + }, + api: testCreateNetworkPeeringAPI( + &admin.BaseNetworkPeeringConnectionSettings{ + ContainerId: testContainerID, + Id: pointer.MakePtr(testPeerID), + ProviderName: pointer.MakePtr("oops also invalid provider"), + AccepterRegionName: pointer.MakePtr("US_EAST_1"), + AwsAccountId: pointer.MakePtr(testAWSSubscriptionID), + RouteTableCidrBlock: pointer.MakePtr("10.0.0.0/18"), + VpcId: pointer.MakePtr(testVpcID), + }, + nil, + ), + expectedPeer: nil, + expectedError: networkpeering.ErrUnsupportedProvider, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkpeering.NewNetworkPeeringService(tc.api) + container, err := s.Create(ctx, testProjectID, testContainerID, tc.cfg) + assert.Equal(t, tc.expectedPeer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkPeeringGet(t *testing.T) { + for _, tc := range []struct { + title string + api admin.NetworkPeeringApi + expectedPeer *networkpeering.NetworkPeer + expectedError error + }{ + { + title: "successful api get for Azure returns success", + api: testGetNetworkPeeringAPI( + &admin.BaseNetworkPeeringConnectionSettings{ + ContainerId: testContainerID, + Id: pointer.MakePtr(testPeerID), + ProviderName: pointer.MakePtr(string(provider.ProviderAzure)), + AzureDirectoryId: pointer.MakePtr(testAzureDirectoryID), + AzureSubscriptionId: pointer.MakePtr(testAzureSubcriptionID), + ResourceGroupName: pointer.MakePtr(testAzureResourceGroup), + VnetName: pointer.MakePtr(testVnet), + }, + nil, + ), + expectedPeer: &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAzure), + AzureConfiguration: &akov2.AzureNetworkPeeringConfiguration{ + AzureDirectoryID: testAzureDirectoryID, + AzureSubscriptionID: testAzureSubcriptionID, + ResourceGroupName: testAzureResourceGroup, + VNetName: testVnet, + }, + }, + ID: testPeerID, + ContainerID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "API not found is detected", + api: testGetNetworkPeeringAPI( + nil, + testAPIError("PEER_NOT_FOUND"), + ), + expectedPeer: nil, + expectedError: networkpeering.ErrNotFound, + }, + + { + title: "generic API failure passes though", + api: testGetNetworkPeeringAPI( + nil, + ErrFakeFailure, + ), + expectedPeer: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "failure to parse API reply", + api: testGetNetworkPeeringAPI( + &admin.BaseNetworkPeeringConnectionSettings{ + ContainerId: testContainerID, + Id: pointer.MakePtr(testPeerID), + ProviderName: pointer.MakePtr("invalid provider"), + AzureDirectoryId: pointer.MakePtr(testAzureDirectoryID), + AzureSubscriptionId: pointer.MakePtr(testAzureSubcriptionID), + ResourceGroupName: pointer.MakePtr(testAzureResourceGroup), + VnetName: pointer.MakePtr(testVnet), + }, + nil, + ), + expectedPeer: nil, + expectedError: networkpeering.ErrUnsupportedProvider, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkpeering.NewNetworkPeeringService(tc.api) + container, err := s.Get(ctx, testProjectID, testPeerID) + assert.Equal(t, tc.expectedPeer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkPeeringUpdate(t *testing.T) { + for _, tc := range []struct { + title string + cfg *akov2.AtlasNetworkPeeringConfig + api admin.NetworkPeeringApi + expectedPeer *networkpeering.NetworkPeer + expectedError error + }{ + { + title: "successful api update for GCP returns success", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, + }, + api: testUpdateNetworkPeeringAPI( + &admin.BaseNetworkPeeringConnectionSettings{ + ContainerId: testContainerID, + Id: pointer.MakePtr(testPeerID), + ProviderName: pointer.MakePtr(string(provider.ProviderGCP)), + GcpProjectId: pointer.MakePtr(testGCPProjectID), + NetworkName: pointer.MakePtr(testNetworkName), + }, + nil, + ), + expectedPeer: &networkpeering.NetworkPeer{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, + }, + ID: testPeerID, + ContainerID: testContainerID, + }, + expectedError: nil, + }, + + { + title: "API failure gets passed through", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, + }, + api: testUpdateNetworkPeeringAPI( + nil, + ErrFakeFailure, + ), + expectedPeer: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "failure to parse config returns before calling API", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: "invalid provider", + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, + }, + expectedPeer: nil, + expectedError: networkpeering.ErrUnsupportedProvider, + }, + + { + title: "failure to parse API reply", + cfg: &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: testGCPProjectID, + NetworkName: testNetworkName, + }, + }, + api: testUpdateNetworkPeeringAPI( + &admin.BaseNetworkPeeringConnectionSettings{ + ContainerId: testContainerID, + Id: pointer.MakePtr(testPeerID), + ProviderName: pointer.MakePtr("oops also invalid provider"), + GcpProjectId: pointer.MakePtr(testGCPProjectID), + NetworkName: pointer.MakePtr(testNetworkName), + }, + nil, + ), + expectedPeer: nil, + expectedError: networkpeering.ErrUnsupportedProvider, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkpeering.NewNetworkPeeringService(tc.api) + container, err := s.Update(ctx, testProjectID, testPeerID, testContainerID, tc.cfg) + assert.Equal(t, tc.expectedPeer, container) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestNetworkPeeringDelete(t *testing.T) { + for _, tc := range []struct { + title string + api admin.NetworkPeeringApi + expectedError error + }{ + { + title: "successful api delete returns success", + api: testDeleteNetworkPeeringAPI(nil), + expectedError: nil, + }, + + { + title: "API not found is detected", + api: testDeleteNetworkPeeringAPI(testAPIError("PEER_NOT_FOUND")), + expectedError: networkpeering.ErrNotFound, + }, + + { + title: "API already deleting also gets not found", + api: testDeleteNetworkPeeringAPI(testAPIError("PEER_ALREADY_REQUESTED_DELETION")), + expectedError: networkpeering.ErrNotFound, + }, + + { + title: "generic API failure passes though", + api: testDeleteNetworkPeeringAPI(ErrFakeFailure), + expectedError: ErrFakeFailure, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := networkpeering.NewNetworkPeeringService(tc.api) + err := s.Delete(ctx, testProjectID, testPeerID) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func testCreateNetworkPeeringAPI(apiPeering *admin.BaseNetworkPeeringConnectionSettings, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().CreatePeeringConnection( + mock.Anything, testProjectID, mock.Anything, + ).Return(admin.CreatePeeringConnectionApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().CreatePeeringConnectionExecute( + mock.AnythingOfType("admin.CreatePeeringConnectionApiRequest"), + ).Return(apiPeering, nil, err) + return &apiMock +} + +func testGetNetworkPeeringAPI(apiPeering *admin.BaseNetworkPeeringConnectionSettings, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().GetPeeringConnection( + mock.Anything, testProjectID, testPeerID, + ).Return(admin.GetPeeringConnectionApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().GetPeeringConnectionExecute( + mock.AnythingOfType("admin.GetPeeringConnectionApiRequest"), + ).Return(apiPeering, nil, err) + return &apiMock +} + +func testUpdateNetworkPeeringAPI(apiPeering *admin.BaseNetworkPeeringConnectionSettings, err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().UpdatePeeringConnection( + mock.Anything, testProjectID, testPeerID, mock.Anything, + ).Return(admin.UpdatePeeringConnectionApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().UpdatePeeringConnectionExecute( + mock.AnythingOfType("admin.UpdatePeeringConnectionApiRequest"), + ).Return(apiPeering, nil, err) + return &apiMock +} + +func testDeleteNetworkPeeringAPI(err error) admin.NetworkPeeringApi { + var apiMock mockadmin.NetworkPeeringApi + + apiMock.EXPECT().DeletePeeringConnection( + mock.Anything, testProjectID, testPeerID, + ).Return(admin.DeletePeeringConnectionApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().DeletePeeringConnectionExecute( + mock.AnythingOfType("admin.DeletePeeringConnectionApiRequest"), + ).Return(nil, nil, err) + return &apiMock +} + +func testAPIError(code string) error { + err := &admin.GenericOpenAPIError{} + err.SetModel(admin.ApiError{ + ErrorCode: pointer.MakePtr(code), + }) + return err +} diff --git a/test/contract/networkpeering/networkpeering_test.go b/test/contract/networkpeering/networkpeering_test.go index ea6879083a..128b5059bd 100644 --- a/test/contract/networkpeering/networkpeering_test.go +++ b/test/contract/networkpeering/networkpeering_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/require" akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkcontainer" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/networkpeering" "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/cloud/aws" "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/cloud/azure" @@ -24,222 +26,152 @@ const ( testVPCName = "ako-test-network-peering-vpc" ) -func TestPeerContainerServiceCRUD(t *testing.T) { - ctx := context.Background() - contract.RunGoContractTest(ctx, t, "test container CRUD", func(ch contract.ContractHelper) { - projectName := utils.RandomName("peer-container-crud-project") - require.NoError(t, ch.AddResources(ctx, 5*time.Minute, contract.DefaultAtlasProject(projectName))) - testProjectID, err := ch.ProjectID(ctx, projectName) - require.NoError(t, err) - nps := networkpeering.NewNetworkPeeringService(ch.AtlasClient().NetworkPeeringApi) - cs := nps.(networkpeering.PeeringContainerService) - for _, tc := range []struct { - provider string - container *networkpeering.ProviderContainer - }{ - { - provider: "AWS", - container: testAWSPeeringContainer("10.1.0.0/21"), - }, - { - provider: "Azure", - container: testAzurePeeringContainer("10.2.0.0/21"), - }, - { - provider: "Google", - container: testGooglePeeringContainer("10.3.0.0/18"), // .../21 is not allowed in GCP - }, - } { - createdContainer := &networkpeering.ProviderContainer{} - t.Run(fmt.Sprintf("create %s container", tc.provider), func(t *testing.T) { - newContainer, err := cs.CreateContainer(ctx, testProjectID, tc.container) - require.NoError(t, err) - assert.NotEmpty(t, newContainer.ID) - createdContainer = newContainer - }) - - t.Run(fmt.Sprintf("list %s containers", tc.provider), func(t *testing.T) { - containers, err := cs.ListContainers(ctx, testProjectID, tc.container.Provider) - require.NoError(t, err) - assert.NotEmpty(t, containers) - assert.GreaterOrEqual(t, len(containers), 1) - }) - - t.Run(fmt.Sprintf("get %s container", tc.provider), func(t *testing.T) { - container, err := cs.GetContainer(ctx, testProjectID, createdContainer.ID) - require.NoError(t, err) - assert.NotEmpty(t, container) - assert.Equal(t, createdContainer.ID, container.ID) - assert.Equal(t, tc.container.ContainerRegion, container.ContainerRegion) - assert.Equal(t, tc.container.AtlasCIDRBlock, container.AtlasCIDRBlock) - }) - - t.Run(fmt.Sprintf("delete %s container", tc.provider), func(t *testing.T) { - time.Sleep(time.Second) // Atlas may reject removal if it happened within a second of creation - assert.NoErrorf(t, cs.DeleteContainer(ctx, testProjectID, createdContainer.ID), - "failed cleanup for provider %s Atlas project ID %s and container id %s", - tc.provider, testProjectID, createdContainer.ID) - }) - } - }) -} - func TestPeerServiceCRUD(t *testing.T) { ctx := context.Background() - contract.RunGoContractTest(ctx, t, "test container CRUD", func(ch contract.ContractHelper) { + contract.RunGoContractTest(ctx, t, "test peer CRUD", func(ch contract.ContractHelper) { projectName := utils.RandomName("peer-connection-crud-project") - require.NoError(t, ch.AddResources(ctx, time.Minute, contract.DefaultAtlasProject(projectName))) + require.NoError(t, ch.AddResources(ctx, 5*time.Minute, contract.DefaultAtlasProject(projectName))) testProjectID, err := ch.ProjectID(ctx, projectName) require.NoError(t, err) + ncs := networkcontainer.NewNetworkContainerService(ch.AtlasClient().NetworkPeeringApi) nps := networkpeering.NewNetworkPeeringService(ch.AtlasClient().NetworkPeeringApi) - ps := nps.(networkpeering.PeerConnectionsService) createdPeer := &networkpeering.NetworkPeer{} for _, tc := range []struct { provider string - preparedCloudTest func(func(peerRequest *networkpeering.NetworkPeer)) + preparedCloudTest func(func(containerID string, cfg *akov2.AtlasNetworkPeeringConfig)) }{ { - provider: "AWS", - preparedCloudTest: func(performTest func(*networkpeering.NetworkPeer)) { + provider: string(provider.ProviderAWS), + preparedCloudTest: func(performTest func(string, *akov2.AtlasNetworkPeeringConfig)) { testContainer := testAWSPeeringContainer("10.10.0.0/21") - awsRegionName := aws.RegionCode(testContainer.ContainerRegion) + awsRegionName := aws.RegionCode(testContainer.Region) vpcCIDR := "10.11.0.0/21" awsVPCid, err := aws.CreateVPC(utils.RandomName(testVPCName), vpcCIDR, awsRegionName) require.NoError(t, err) - newContainer, err := nps.CreateContainer(ctx, testProjectID, testContainer) + newContainer, err := ncs.Create(ctx, testProjectID, testContainer) require.NoError(t, err) assert.NotEmpty(t, newContainer.ID) defer func() { require.NoError(t, aws.DeleteVPC(awsVPCid, awsRegionName)) }() - performTest(testAWSPeerConnection(t, newContainer.ID, vpcCIDR, awsVPCid)) + performTest(newContainer.ID, testAWSPeerConnection(t, vpcCIDR, awsVPCid)) }, }, { - provider: "AZURE", - preparedCloudTest: func(performTest func(*networkpeering.NetworkPeer)) { + provider: string(provider.ProviderAzure), + preparedCloudTest: func(performTest func(string, *akov2.AtlasNetworkPeeringConfig)) { testContainer := testAzurePeeringContainer("10.20.0.0/21") - azureRegionName := azure.RegionCode(testContainer.ContainerRegion) + azureRegionName := azure.RegionCode(testContainer.Region) vpcCIDR := "10.21.0.0/21" azureVPC, err := azure.CreateVPC(ctx, utils.RandomName(testVPCName), vpcCIDR, azureRegionName) require.NoError(t, err) - newContainer, err := nps.CreateContainer(ctx, testProjectID, testContainer) + newContainer, err := ncs.Create(ctx, testProjectID, testContainer) require.NoError(t, err) assert.NotEmpty(t, newContainer.ID) defer func() { require.NoError(t, azure.DeleteVPC(ctx, azureVPC)) }() - performTest(testAzurePeerConnection(t, newContainer.ID, azureVPC)) + performTest(newContainer.ID, testAzurePeerConnection(t, azureVPC)) }, }, { - provider: "GOOGLE", - preparedCloudTest: func(performTest func(*networkpeering.NetworkPeer)) { + provider: string(provider.ProviderGCP), + preparedCloudTest: func(performTest func(string, *akov2.AtlasNetworkPeeringConfig)) { testContainer := testGooglePeeringContainer("10.30.0.0/18") vpcName := utils.RandomName(testVPCName) require.NoError(t, google.CreateVPC(ctx, vpcName)) - newContainer, err := nps.CreateContainer(ctx, testProjectID, testContainer) + newContainer, err := ncs.Create(ctx, testProjectID, testContainer) require.NoError(t, err) assert.NotEmpty(t, newContainer.ID) defer func() { require.NoError(t, google.DeleteVPC(ctx, vpcName)) }() - performTest(testGooglePeerConnection(t, newContainer.ID, vpcName)) + performTest(newContainer.ID, testGooglePeerConnection(t, vpcName)) }, }, } { - tc.preparedCloudTest(func(peerRequest *networkpeering.NetworkPeer) { + tc.preparedCloudTest(func(containerID string, cfg *akov2.AtlasNetworkPeeringConfig) { t.Run(fmt.Sprintf("create %s peer connection", tc.provider), func(t *testing.T) { - newPeer, err := ps.CreatePeer(ctx, testProjectID, peerRequest) + newPeer, err := nps.Create(ctx, testProjectID, containerID, cfg) require.NoError(t, err) assert.NotEmpty(t, newPeer) createdPeer = newPeer }) - t.Run(fmt.Sprintf("list %s peer connections", tc.provider), func(t *testing.T) { - containers, err := ps.ListPeers(ctx, testProjectID) + t.Run(fmt.Sprintf("get %s peer connection", tc.provider), func(t *testing.T) { + peer, err := nps.Get(ctx, testProjectID, createdPeer.ID) require.NoError(t, err) - assert.NotEmpty(t, containers) - assert.GreaterOrEqual(t, len(containers), 1) + assert.Equal(t, createdPeer, peer) }) t.Run(fmt.Sprintf("delete %s peer connection", tc.provider), func(t *testing.T) { - assert.NoError(t, ps.DeletePeer(ctx, testProjectID, createdPeer.ID)) + assert.NoError(t, nps.Delete(ctx, testProjectID, createdPeer.ID)) }) }) } }) } -func testAWSPeeringContainer(cidr string) *networkpeering.ProviderContainer { - return &networkpeering.ProviderContainer{ - Provider: "AWS", - AtlasProviderContainerConfig: akov2.AtlasProviderContainerConfig{ - ContainerRegion: "US_EAST_1", - AtlasCIDRBlock: cidr, +func testAWSPeeringContainer(cidr string) *networkcontainer.NetworkContainerConfig { + return &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: cidr, }, } } -func testAzurePeeringContainer(cidr string) *networkpeering.ProviderContainer { - return &networkpeering.ProviderContainer{ - Provider: "AZURE", - AtlasProviderContainerConfig: akov2.AtlasProviderContainerConfig{ - ContainerRegion: "US_EAST_2", - AtlasCIDRBlock: cidr, +func testAzurePeeringContainer(cidr string) *networkcontainer.NetworkContainerConfig { + return &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_2", + CIDRBlock: cidr, }, } } -func testGooglePeeringContainer(cidr string) *networkpeering.ProviderContainer { - return &networkpeering.ProviderContainer{ - Provider: "GCP", - AtlasProviderContainerConfig: akov2.AtlasProviderContainerConfig{ - AtlasCIDRBlock: cidr, +func testGooglePeeringContainer(cidr string) *networkcontainer.NetworkContainerConfig { + return &networkcontainer.NetworkContainerConfig{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: cidr, }, } } -func testAWSPeerConnection(t *testing.T, containerID string, vpcCIDR, vpcID string) *networkpeering.NetworkPeer { - return &networkpeering.NetworkPeer{ - AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ - Provider: "AWS", - ContainerID: containerID, - AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ - AWSAccountID: mustHaveEnvVar(t, "AWS_ACCOUNT_ID"), - AccepterRegionName: "us-east-1", - RouteTableCIDRBlock: vpcCIDR, - VpcID: vpcID, - }, +func testAWSPeerConnection(t *testing.T, vpcCIDR, vpcID string) *akov2.AtlasNetworkPeeringConfig { + return &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AWSAccountID: mustHaveEnvVar(t, "AWS_ACCOUNT_ID"), + AccepterRegionName: "us-east-1", + RouteTableCIDRBlock: vpcCIDR, + VpcID: vpcID, }, } } -func testAzurePeerConnection(t *testing.T, containerID string, vpcName string) *networkpeering.NetworkPeer { - return &networkpeering.NetworkPeer{ - AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ - Provider: "AZURE", - ContainerID: containerID, - AzureConfiguration: &akov2.AzureNetworkPeeringConfiguration{ - AzureDirectoryID: mustHaveEnvVar(t, "AZURE_TENANT_ID"), - AzureSubscriptionID: mustHaveEnvVar(t, "AZURE_SUBSCRIPTION_ID"), - ResourceGroupName: azure.TestResourceGroupName(), - VNetName: vpcName, - }, +func testAzurePeerConnection(t *testing.T, vpcName string) *akov2.AtlasNetworkPeeringConfig { + return &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAzure), + AzureConfiguration: &akov2.AzureNetworkPeeringConfiguration{ + AzureDirectoryID: mustHaveEnvVar(t, "AZURE_TENANT_ID"), + AzureSubscriptionID: mustHaveEnvVar(t, "AZURE_SUBSCRIPTION_ID"), + ResourceGroupName: azure.TestResourceGroupName(), + VNetName: vpcName, }, } } -func testGooglePeerConnection(t *testing.T, containerID string, vpcName string) *networkpeering.NetworkPeer { - return &networkpeering.NetworkPeer{ - AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ - Provider: "GCP", - ContainerID: containerID, - GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ - GCPProjectID: mustHaveEnvVar(t, "GOOGLE_PROJECT_ID"), - NetworkName: vpcName, - }, +func testGooglePeerConnection(t *testing.T, vpcName string) *akov2.AtlasNetworkPeeringConfig { + return &akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: mustHaveEnvVar(t, "GOOGLE_PROJECT_ID"), + NetworkName: vpcName, }, } } diff --git a/test/e2e/network_peering_controller_test.go b/test/e2e/network_peering_controller_test.go new file mode 100644 index 0000000000..8bd4e00376 --- /dev/null +++ b/test/e2e/network_peering_controller_test.go @@ -0,0 +1,424 @@ +package e2e_test + +import ( + "fmt" + "os" + "time" + + "github.com/google/uuid" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/provider" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/actions" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/actions/cloud" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/data" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/model" +) + +const ( + statusPendingAcceptance = "PENDING_ACCEPTANCE" + statusWaitingUser = "WAITING_FOR_USER" + SubscriptionID = "AZURE_SUBSCRIPTION_ID" + DirectoryID = "AZURE_TENANT_ID" + GCPVPCName = "network-peering-gcp-1-vpc" + AzureVPCName = "test-vnet" +) + +func newRandomName(base string) string { + randomSuffix := uuid.New().String()[0:6] + return fmt.Sprintf("%s-%s", base, randomSuffix) +} + +type containerAndPeering struct { + container *akov2.AtlasNetworkContainer + peering *akov2.AtlasNetworkPeering +} + +var _ = Describe("NetworkPeeringController", Label("networkpeering-controller"), func() { + var testData *model.TestDataProvider + + _ = BeforeEach(OncePerOrdered, func() { + checkUpAWSEnvironment() + checkUpAzureEnvironment() + checkNSetUpGCPEnvironment() + }) + + _ = AfterEach(func() { + GinkgoWriter.Write([]byte("\n")) + GinkgoWriter.Write([]byte("===============================================\n")) + GinkgoWriter.Write([]byte("Network Peering Controller Test\n")) + GinkgoWriter.Write([]byte("Operator namespace: " + testData.Resources.Namespace + "\n")) + GinkgoWriter.Write([]byte("===============================================\n")) + if CurrentSpecReport().Failed() { + Expect(actions.SaveProjectsToFile(testData.Context, testData.K8SClient, testData.Resources.Namespace)).Should(Succeed()) + } + By("Delete Resources, Project with NetworkPeering", func() { + actions.DeleteTestDataNetworkPeerings(testData) + actions.DeleteTestDataNetworkContainers(testData) + actions.DeleteTestDataProject(testData) + actions.AfterEachFinalCleanup([]model.TestDataProvider{*testData}) + }) + }) + + DescribeTable("NetworkPeeringController", + func(test *model.TestDataProvider, pairs []containerAndPeering) { + testData = test + actions.ProjectCreationFlow(test) + networkPeerControllerFlow(test, pairs) + }, + Entry("Test[networkpeering-aws-1]: AWS Network Peering CR within a region and without existent Atlas Container", + Label("network-peering-cr-aws-1"), + model.DataProvider( + "network-peering-cr-aws-1", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + []containerAndPeering{ + { + container: &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.8.0.0/22", + }, + }, + }, + peering: &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", // AccepterRegionName uses AWS region names + AWSAccountID: os.Getenv("AWS_ACCOUNT_ID"), + RouteTableCIDRBlock: "10.0.0.0/24", + }, + }, + }, + }, + }, + }, + ), + Entry("Test[networkpeering-aws-2]: AWS Network Peering CR between different regions and without existent Atlas Container", + Label("network-peering-cr-aws-2"), + model.DataProvider( + "network-peering-cr-aws-2", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + []containerAndPeering{ + { + container: &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.8.0.0/22", + }, + }, + }, + peering: &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "eu-west-2", + AWSAccountID: os.Getenv("AWS_ACCOUNT_ID"), + RouteTableCIDRBlock: "10.0.0.0/24", + }, + }, + }, + }, + }, + }, + ), + Entry("Test[networkpeering-aws-3]: AWS Network Peering CRs between different regions and without container region specified", + Label("network-peering-cr-aws-3"), + model.DataProvider( + "network-peering-cr-aws-3", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + []containerAndPeering{ + { + container: &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + Region: "US_EAST_1", + CIDRBlock: "10.64.0.0/22", + }, + }, + }, + peering: &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "eu-west-1", + AWSAccountID: os.Getenv("AWS_ACCOUNT_ID"), + RouteTableCIDRBlock: "192.168.0.0/16", + }, + }, + }, + }, + }, + { + container: &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAWS), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "10.128.0.0/22", + Region: "US_WEST_1", + }, + }, + }, + peering: &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAWS), + AWSConfiguration: &akov2.AWSNetworkPeeringConfiguration{ + AccepterRegionName: "us-east-1", + AWSAccountID: os.Getenv("AWS_ACCOUNT_ID"), + RouteTableCIDRBlock: "10.0.0.0/24", + }, + }, + }, + }, + }, + }, + ), + Entry("Test[networkpeering-gcp-1]: GCP Network Peering CR", + Label("network-peering-cr-gcp-1"), + model.DataProvider( + "network-peering-cr-gcp-1", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + []containerAndPeering{ + { + container: &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderGCP), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "10.8.0.0/18", + }, + }, + }, + peering: &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderGCP), + GCPConfiguration: &akov2.GCPNetworkPeeringConfiguration{ + GCPProjectID: cloud.GoogleProjectID, + NetworkName: newRandomName(GCPVPCName), + }, + }, + }, + }, + }, + }, + ), + Entry("Test[networkpeering-azure-1]: Azure Network Peering CR", + Label("network-peering-cr-azure-1"), + model.DataProvider( + "network-peering-cr-azure-1", + model.NewEmptyAtlasKeyType().UseDefaultFullAccess(), + 40000, + []func(*model.TestDataProvider){}, + ).WithProject(data.DefaultProject()), + []containerAndPeering{ + { + container: &akov2.AtlasNetworkContainer{ + Spec: akov2.AtlasNetworkContainerSpec{ + Provider: string(provider.ProviderAzure), + AtlasNetworkContainerConfig: akov2.AtlasNetworkContainerConfig{ + CIDRBlock: "192.168.248.0/21", + Region: "US_EAST_2", + }, + }, + }, + peering: &akov2.AtlasNetworkPeering{ + Spec: akov2.AtlasNetworkPeeringSpec{ + AtlasNetworkPeeringConfig: akov2.AtlasNetworkPeeringConfig{ + Provider: string(provider.ProviderAzure), + AzureConfiguration: &akov2.AzureNetworkPeeringConfiguration{ + AzureDirectoryID: os.Getenv(DirectoryID), + AzureSubscriptionID: os.Getenv(SubscriptionID), + ResourceGroupName: cloud.ResourceGroupName, + VNetName: newRandomName(AzureVPCName), + }, + }, + }, + }, + }, + }, + ), + ) +}) + +func networkPeerControllerFlow(userData *model.TestDataProvider, pairs []containerAndPeering) { + providerActions := make([]cloud.Provider, len(pairs)) + + By("Prepare network peers cloud infrastructure from CRs", func() { + for i, pair := range pairs { + peer := pair.peering + providerAction, err := prepareProviderAction() + Expect(err).To(BeNil()) + providerActions[i] = providerAction + + providerName := provider.ProviderName(peer.Spec.Provider) + switch providerName { + case provider.ProviderAWS: + peer.Spec.AWSConfiguration.AWSAccountID = providerActions[i].GetAWSAccountID() + cfg := &cloud.AWSConfig{ + Region: peer.Spec.AWSConfiguration.AccepterRegionName, + VPC: newRandomName("ao-vpc-peering-e2e"), + CIDR: peer.Spec.AWSConfiguration.RouteTableCIDRBlock, + Subnets: map[string]string{"ao-peering-e2e-subnet": peer.Spec.AWSConfiguration.RouteTableCIDRBlock}, + EnableCleanup: true, + } + peer.Spec.AWSConfiguration.VpcID = providerActions[i].SetupNetwork(providerName, cloud.WithAWSConfig(cfg)) + case provider.ProviderGCP: + cfg := &cloud.GCPConfig{ + VPC: peer.Spec.GCPConfiguration.NetworkName, + EnableCleanup: true, + } + providerActions[i].SetupNetwork(providerName, cloud.WithGCPConfig(cfg)) + case provider.ProviderAzure: + cfg := &cloud.AzureConfig{ + VPC: peer.Spec.AzureConfiguration.VNetName, + EnableCleanup: true, + } + providerActions[i].SetupNetwork(providerName, cloud.WithAzureConfig(cfg)) + } + } + }) + + By("Create network containers from CRs and update their IDs", func() { + for i, pair := range pairs { + container := pair.container + container.Spec.ProjectRef = &common.ResourceRefNamespaced{ + Name: userData.Project.Name, + Namespace: userData.Project.Namespace, + } + container.Name = fmt.Sprintf("container-%s-item-%d", userData.Prefix, i) + container.Namespace = userData.Project.Namespace + Expect(userData.K8SClient.Create(userData.Context, container)).Should(Succeed()) + } + for _, pair := range pairs { + key := client.ObjectKeyFromObject(pair.container) + Eventually(func(g Gomega) bool { + Expect(userData.K8SClient.Get(userData.Context, key, pair.container)).Should(Succeed()) + return pair.container.Status.ID != "" + }).WithTimeout(3*time.Minute).WithPolling(20*time.Second).Should( + BeTrue(), + "Network Containers CRs should be created with an Atlas ID set in the status", + ) + } + }) + + By("Create network peer from CRs", func() { + for i, pair := range pairs { + peer := pair.peering + peer.Spec.ProjectRef = &common.ResourceRefNamespaced{ + Name: userData.Project.Name, + Namespace: userData.Project.Namespace, + } + peer.Name = fmt.Sprintf("%s-item-%d", userData.Prefix, i) + peer.Namespace = userData.Project.Namespace + peer.Spec.ContainerRef.ID = pair.container.Status.ID + Expect(userData.K8SClient.Create(userData.Context, peer)).Should(Succeed()) + } + }) + + By("Establish network peers connection with CRs", func() { + Eventually(func(g Gomega) bool { + return EnsurePeersReadyToConnect(g, userData, pairs) + }).WithTimeout(15*time.Minute).WithPolling(20*time.Second).Should( + BeTrue(), + "Network Peering CRs should be ready to establish connection", + ) + + for ix, pair := range pairs { + peer := pair.peering + providerName := provider.ProviderName(peer.Spec.Provider) + switch providerName { + case provider.ProviderAWS: + providerActions[ix].SetupNetworkPeering( + providerName, + peer.Status.AWSStatus.ConnectionID, + "", + ) + case provider.ProviderGCP: + providerActions[ix].SetupNetworkPeering( + providerName, + pair.peering.Status.GCPStatus.GCPProjectID, + pair.peering.Status.GCPStatus.NetworkName, + ) + } + key := types.NamespacedName{Name: peer.Name, Namespace: peer.Namespace} + Eventually(func(g Gomega) bool { + Expect(userData.K8SClient.Get(userData.Context, key, peer)).Should(Succeed()) + return peer.Status.Status == "AVAILABLE" + }).WithTimeout(15*time.Minute).WithPolling(5*time.Second).Should( + BeTrue(), + "Network Peering CRs should become available", + ) + } + }) + + By("Check network containers & peers CRs to be Ready", func() { + for _, pair := range pairs { + containerKey := client.ObjectKeyFromObject(pair.container) + Expect(userData.K8SClient.Get(userData.Context, containerKey, pair.container)).Should(Succeed()) + Expect(networkContainerReady(pair.container)).Should(BeTrue()) + + key := client.ObjectKeyFromObject(pair.peering) + Expect(userData.K8SClient.Get(userData.Context, key, pair.peering)).Should(Succeed()) + Expect(networkPeeringReady(pair.peering)).Should(BeTrue()) + } + }) +} + +func EnsurePeersReadyToConnect(g Gomega, userData *model.TestDataProvider, pairs []containerAndPeering) bool { + for _, pair := range pairs { + key := client.ObjectKeyFromObject(pair.peering) + g.Expect(userData.K8SClient.Get(userData.Context, key, pair.peering)).Should(Succeed()) + if pair.peering.Spec.Provider == string(provider.ProviderAzure) { + continue + } + statusMsg := pair.peering.Status.Status + if statusMsg != statusPendingAcceptance && statusMsg != statusWaitingUser { + return false + } + if pair.peering.Spec.Provider == string(provider.ProviderAWS) && + pair.peering.Status.AWSStatus == nil { + return false + } + } + By("Network containers & peers are ready to connect", func() {}) + return true +} + +func networkPeeringReady(peer *akov2.AtlasNetworkPeering) bool { + for _, condition := range peer.Status.Conditions { + GinkgoWriter.Printf("TODO: REMOVE LOG peer %s condition type=%s status=%s", + peer.Status.ID, condition.Type, condition.Status) + if condition.Type == api.ReadyType && condition.Status == v1.ConditionTrue { + return true + } + } + return false +} diff --git a/test/e2e/network_peering_test.go b/test/e2e/network_peering_test.go index 9fb0b9ae37..59799169b5 100644 --- a/test/e2e/network_peering_test.go +++ b/test/e2e/network_peering_test.go @@ -1,11 +1,9 @@ package e2e_test import ( - "fmt" "os" "time" - "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" @@ -20,20 +18,6 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/model" ) -const ( - statusPendingAcceptance = "PENDING_ACCEPTANCE" - statusWaitingUser = "WAITING_FOR_USER" - SubscriptionID = "AZURE_SUBSCRIPTION_ID" - DirectoryID = "AZURE_TENANT_ID" - GCPVPCName = "network-peering-gcp-1-vpc" - AzureVPCName = "test-vnet" -) - -func newRandomName(base string) string { - randomSuffix := uuid.New().String()[0:6] - return fmt.Sprintf("%s-%s", base, randomSuffix) -} - var _ = Describe("NetworkPeering", Label("networkpeering"), func() { var testData *model.TestDataProvider @@ -224,7 +208,7 @@ func networkPeerFlow(userData *model.TestDataProvider, peers []akov2.NetworkPeer By("Establish network peers connection", func() { Eventually(func(g Gomega) bool { - return EnsurePeersReadyToConnect(g, userData, len(peers)) + return EnsureProjectPeersReadyToConnect(g, userData, len(peers)) }).WithTimeout(15*time.Minute).WithPolling(20*time.Second).Should(BeTrue(), "Network Peering should be ready to establish connection") Expect(userData.K8SClient.Get(userData.Context, types.NamespacedName{Name: userData.Project.Name, Namespace: userData.Project.Namespace}, userData.Project)).Should(Succeed()) @@ -245,7 +229,7 @@ func networkPeerFlow(userData *model.TestDataProvider, peers []akov2.NetworkPeer }) } -func EnsurePeersReadyToConnect(g Gomega, userData *model.TestDataProvider, lenOfSpec int) bool { +func EnsureProjectPeersReadyToConnect(g Gomega, userData *model.TestDataProvider, lenOfSpec int) bool { g.Expect(userData.K8SClient.Get(userData.Context, types.NamespacedName{Name: userData.Project.Name, Namespace: userData.Project.Namespace}, userData.Project)).Should(Succeed()) if len(userData.Project.Status.NetworkPeers) != lenOfSpec { return false diff --git a/test/helper/e2e/actions/steps.go b/test/helper/e2e/actions/steps.go index 441af602f7..66eb7a900a 100644 --- a/test/helper/e2e/actions/steps.go +++ b/test/helper/e2e/actions/steps.go @@ -533,6 +533,24 @@ func DeleteTestDataNetworkContainers(data *model.TestDataProvider) { }) } +func DeleteTestDataNetworkPeerings(data *model.TestDataProvider) { + By("Delete network peerings", func() { + peerings := &akov2.AtlasNetworkPeeringList{} + Expect(data.K8SClient.List(data.Context, peerings, &client.ListOptions{Namespace: data.Resources.Namespace})).Should(Succeed()) + for _, peering := range peerings.Items { + Expect(data.K8SClient.Delete(data.Context, &peering)).Should(Succeed()) + key := client.ObjectKey{Name: peering.Name, Namespace: peering.Namespace} + Eventually( + func() bool { + foundPeering := &akov2.AtlasNetworkPeering{} + err := data.K8SClient.Get(data.Context, key, foundPeering) + return err != nil && errors.IsNotFound(err) + }, + ).WithTimeout(10*time.Minute).WithPolling(20*time.Second).Should(BeTrue(), "Network peering should be deleted from Atlas") + } + }) +} + func AfterEachFinalCleanup(datas []model.TestDataProvider) { for i := range datas { data := datas[i]