diff --git a/Makefile b/Makefile index f044364e..1e76fa66 100644 --- a/Makefile +++ b/Makefile @@ -365,7 +365,7 @@ nilcheck: $(NILAWAY) ## Run nil check against codemake. @# Backendstore contains mostly nil safe generated files. @# Lifecycleconfig_helper has false positive reports: https://github.com/uber-go/nilaway/issues/207 go list ./... | xargs -I {} -d '\n' $(NILAWAY) \ - -exclude-errors-in-files $(PWD)/internal/controller/bucket/bucket_backends.go,$(PWD)/internal/rgw/lifecycleconfig_helpers.go \ + -exclude-errors-in-files $(PWD)/internal/controller/bucket/bucket_backends.go,$(PWD)/internal/rgw/lifecycleconfig_helpers.go,$(PWD)/internal/rgw/objectlockconfiguration_helpers.go \ -exclude-pkgs github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1,github.com/linode/provider-ceph/internal/backendstore \ -include-pkgs {} ./... diff --git a/apis/provider-ceph/v1alpha1/bucket_types.go b/apis/provider-ceph/v1alpha1/bucket_types.go index 0a15b55f..67280c59 100644 --- a/apis/provider-ceph/v1alpha1/bucket_types.go +++ b/apis/provider-ceph/v1alpha1/bucket_types.go @@ -54,6 +54,7 @@ type BucketParameters struct { GrantWriteACP *string `json:"grantWriteACP,omitempty"` // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // +kubebuilder:validation:Enum=true;null ObjectLockEnabledForBucket *bool `json:"objectLockEnabledForBucket,omitempty"` // The container element for object ownership for a bucket's ownership controls. @@ -88,6 +89,10 @@ type BucketParameters struct { // +optional VersioningConfiguration *VersioningConfiguration `json:"versioningConfiguration,omitempty"` + // ObjectLockConfiguration describes the desired object lock state of an S3 bucket. + // +optional + ObjectLockConfiguration *ObjectLockConfiguration `json:"objectLockConfiguration,omitempty"` + // AssumeRoleTags may be used to add custom values to an AssumeRole request. // +optional AssumeRoleTags []Tag `json:"assumeRoleTags,omitempty"` @@ -114,6 +119,11 @@ type BackendInfo struct { // configuration on the S3 backend. Use a pointer to allow nil value when // there is no versioning configuration. VersioningConfigurationCondition *xpv1.Condition `json:"versioningConfigurationCondition,omitempty"` + // +optional + // ObjectLockConfigurationCondition is the condition of the object lock + // configuration on the S3 backend. Use a pointer to allow nil value when + // there is no object lock configuration. + ObjectLockConfigurationCondition *xpv1.Condition `json:"objectLockConfigurationCondition,omitempty"` } // Backends is a map of the names of the S3 backends to BackendInfo. diff --git a/apis/provider-ceph/v1alpha1/objectlockconfiguration_types.go b/apis/provider-ceph/v1alpha1/objectlockconfiguration_types.go new file mode 100644 index 00000000..c1b3aa05 --- /dev/null +++ b/apis/provider-ceph/v1alpha1/objectlockconfiguration_types.go @@ -0,0 +1,53 @@ +package v1alpha1 + +type ObjectLockEnabled string + +const ( + ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled" +) + +type DefaultRetentionMode string + +const ( + ModeGovernance DefaultRetentionMode = "GOVERNANCE" + ModeCompliance DefaultRetentionMode = "COMPLIANCE" +) + +// ObjectLockConfiguration describes the object lock state of an S3 bucket. +type ObjectLockConfiguration struct { + // +optional. + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + // +kubebuilder:validation:Enum=Enabled + ObjectLockEnabled *ObjectLockEnabled `json:"objectLockEnabled,omitempty"` + // +optional. + // Specifies the Object Lock rule for the specified object. Enable this rule + // when you apply ObjectLockConfiguration to a bucket. Bucket settings require + // both a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule `json:"objectLockRule,omitempty"` +} + +type ObjectLockRule struct { + // +optional. + // The default Object Lock retention mode and period that you want to apply to new + // objects placed in the specified bucket. Bucket settings require both a mode and + // a period. The period can be either Days or Years but you must select one. You + // cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention `json:"defaultRetention,omitempty"` +} + +type DefaultRetention struct { + // +optional. + // The number of days that you want to specify for the default retention period. + // Must be used with Mode. + Days *int32 `json:"days,omitempty"` + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years. + // +kubebuilder:validation:Enum=GOVERNANCE;COMPLIANCE + Mode DefaultRetentionMode `json:"mode,omitempty"` + // +optional. + // The number of years that you want to specify for the default retention period. + // Must be used with Mode. + Years *int32 `json:"years,omitempty"` +} diff --git a/apis/provider-ceph/v1alpha1/zz_generated.deepcopy.go b/apis/provider-ceph/v1alpha1/zz_generated.deepcopy.go index c5e7a6ef..1272270d 100644 --- a/apis/provider-ceph/v1alpha1/zz_generated.deepcopy.go +++ b/apis/provider-ceph/v1alpha1/zz_generated.deepcopy.go @@ -86,6 +86,11 @@ func (in *BackendInfo) DeepCopyInto(out *BackendInfo) { *out = new(v1.Condition) (*in).DeepCopyInto(*out) } + if in.ObjectLockConfigurationCondition != nil { + in, out := &in.ObjectLockConfigurationCondition, &out.ObjectLockConfigurationCondition + *out = new(v1.Condition) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendInfo. @@ -298,6 +303,11 @@ func (in *BucketParameters) DeepCopyInto(out *BucketParameters) { *out = new(VersioningConfiguration) (*in).DeepCopyInto(*out) } + if in.ObjectLockConfiguration != nil { + in, out := &in.ObjectLockConfiguration, &out.ObjectLockConfiguration + *out = new(ObjectLockConfiguration) + (*in).DeepCopyInto(*out) + } if in.AssumeRoleTags != nil { in, out := &in.AssumeRoleTags, &out.AssumeRoleTags *out = make([]Tag, len(*in)) @@ -354,6 +364,31 @@ func (in *BucketStatus) DeepCopy() *BucketStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultRetention) DeepCopyInto(out *DefaultRetention) { + *out = *in + if in.Days != nil { + in, out := &in.Days, &out.Days + *out = new(int32) + **out = **in + } + if in.Years != nil { + in, out := &in.Years, &out.Years + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultRetention. +func (in *DefaultRetention) DeepCopy() *DefaultRetention { + if in == nil { + return nil + } + out := new(DefaultRetention) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Grant) DeepCopyInto(out *Grant) { *out = *in @@ -622,6 +657,51 @@ func (in *NoncurrentVersionTransition) DeepCopy() *NoncurrentVersionTransition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockConfiguration) DeepCopyInto(out *ObjectLockConfiguration) { + *out = *in + if in.ObjectLockEnabled != nil { + in, out := &in.ObjectLockEnabled, &out.ObjectLockEnabled + *out = new(ObjectLockEnabled) + **out = **in + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(ObjectLockRule) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockConfiguration. +func (in *ObjectLockConfiguration) DeepCopy() *ObjectLockConfiguration { + if in == nil { + return nil + } + out := new(ObjectLockConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectLockRule) DeepCopyInto(out *ObjectLockRule) { + *out = *in + if in.DefaultRetention != nil { + in, out := &in.DefaultRetention, &out.DefaultRetention + *out = new(DefaultRetention) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectLockRule. +func (in *ObjectLockRule) DeepCopy() *ObjectLockRule { + if in == nil { + return nil + } + out := new(ObjectLockRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Owner) DeepCopyInto(out *Owner) { *out = *in diff --git a/e2e/tests/ceph/chainsaw-test.yaml b/e2e/tests/ceph/chainsaw-test.yaml index 1a1ccaf9..b6b1381a 100755 --- a/e2e/tests/ceph/chainsaw-test.yaml +++ b/e2e/tests/ceph/chainsaw-test.yaml @@ -62,7 +62,9 @@ spec: spec: providers: - ceph-cluster - forProvider: {} + forProvider: + objectLockEnabledForBucket: true + - apply: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 @@ -135,7 +137,7 @@ spec: - $CEPH_ADDRESS entrypoint: ../../../hack/expect_bucket.sh - - name: Apply lifecycle configuration and versioning configuration to test-bucket. + - name: Apply lifecycle, versioning and object lock configurations to test-bucket. try: - apply: resource: @@ -147,6 +149,13 @@ spec: providers: - ceph-cluster forProvider: + objectLockEnabledForBucket: true + objectLockConfiguration: + objectLockEnabled: "Enabled" + objectLockRule: + defaultRetention: + days: 1 + mode: "COMPLIANCE" versioningConfiguration: status: "Enabled" lifecycleConfiguration: @@ -201,6 +210,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready conditions: - reason: Available status: "True" @@ -209,7 +222,7 @@ spec: status: "True" type: Synced - - name: Disable lifecycle configuration and remove versioning configuration on test-bucket. + - name: Disable lifecycle configuration and edit object lock configuration on test-bucket. try: - apply: resource: @@ -224,7 +237,11 @@ spec: - ceph-cluster lifecycleConfigurationDisabled: true forProvider: + objectLockEnabledForBucket: true + objectLockConfiguration: + objectLockEnabled: "Enabled" versioningConfiguration: + status: "Enabled" lifecycleConfiguration: # Example rules https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-configuration-examples.html rules: diff --git a/e2e/tests/stable/chainsaw-test.yaml b/e2e/tests/stable/chainsaw-test.yaml index fced3a4e..12973fc7 100755 --- a/e2e/tests/stable/chainsaw-test.yaml +++ b/e2e/tests/stable/chainsaw-test.yaml @@ -114,14 +114,14 @@ spec: status: availableReplicas: 1 - - name: Apply lc-and-version-configs and auto-pause-bucket. + - name: Apply subresource-configs and auto-pause-bucket. try: - apply: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 kind: Bucket metadata: - name: lc-and-version-configs + name: subresource-configs labels: provider-ceph.crossplane.io/validation-required: "true" spec: @@ -132,11 +132,18 @@ spec: - id: "ImageExpiration" status: "Enabled" expiration: - days: 1 + days: 5 filter: prefix: "images/" versioningConfiguration: status: "Enabled" + objectLockEnabledForBucket: true + objectLockConfiguration: + objectLockEnabled: "Enabled" + objectLockRule: + defaultRetention: + days: 1 + mode: "COMPLIANCE" - apply: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 @@ -148,13 +155,13 @@ spec: spec: autoPause: true forProvider: {} - # Assert lc-and-version-configs is synced with LC and Versioning configs on backends. + # Assert subresource-configs is synced with LC and Versioning configs on backends. - assert: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 kind: Bucket metadata: - name: lc-and-version-configs + name: subresource-configs finalizers: - "finalizer.managedresource.crossplane.io" labels: @@ -177,6 +184,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready localstack-b: bucketCondition: reason: Available @@ -190,6 +201,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready localstack-c: bucketCondition: reason: Available @@ -203,6 +218,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready # Extra assertion for overall Bucket conditions. # This method of iterative assertions is necessary here because # these conditions do not always appear in the same order. @@ -214,7 +233,7 @@ spec: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 kind: Bucket metadata: - name: lc-and-version-configs + name: subresource-configs status: ~.(conditions[?reason == 'Available']): status: "True" @@ -264,14 +283,14 @@ spec: status: "True" type: Synced - - name: Disable LC config and remove versioning on lc-and-version-configs. + - name: Disable LC config and remove object lock on subresource-configs. try: - apply: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 kind: Bucket metadata: - name: lc-and-version-configs + name: subresource-configs labels: provider-ceph.crossplane.io/validation-required: "true" spec: @@ -290,15 +309,17 @@ spec: filter: prefix: "images/" versioningConfiguration: - # Assert that the LC config has been removed from lc-and-version-configs. - # Assert that the versioning config remains. By removing it from the Spec above - # we are only suspending versioning, buckets cannot be un-versioned. + status: "Enabled" + objectLockEnabledForBucket: true + objectLockConfiguration: + # Assert that the LC config has been removed from subresource-configs. + # Assert that the object lock config remains - object lock cannot be disabled. - assert: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 kind: Bucket metadata: - name: lc-and-version-configs + name: subresource-configs status: atProvider: backends: @@ -311,6 +332,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready localstack-b: bucketCondition: reason: Available @@ -320,6 +345,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready localstack-c: bucketCondition: reason: Available @@ -329,6 +358,10 @@ spec: reason: Available status: "True" type: Ready + objectLockConfigurationCondition: + reason: Available + status: "True" + type: Ready # This method of iterative assertions is necessary here because # these conditions do not always appear in the same order. @@ -343,23 +376,23 @@ spec: - name: Check for buckets on Localstack backends. try: - # Check for lc-and-version-configs on all backends. + # Check for subresource-configs on all backends. - command: args: - bucket_exists - - lc-and-version-configs + - subresource-configs - local-dev-control-plane:32566 entrypoint: ../../../hack/expect_bucket.sh - command: args: - bucket_exists - - lc-and-version-configs + - subresource-configs - local-dev-control-plane:32567 entrypoint: ../../../hack/expect_bucket.sh - command: args: - bucket_exists - - lc-and-version-configs + - subresource-configs - local-dev-control-plane:32568 entrypoint: ../../../hack/expect_bucket.sh # Check for auto-pause-bucket on all backends. @@ -442,16 +475,16 @@ spec: - local-dev-control-plane:32568 entrypoint: ../../../hack/expect_bucket.sh - - name: Delete lc-and-version-configs and auto-pause-bucket. + - name: Delete subresource-configs and auto-pause-bucket. try: - command: args: - delete - bucket - - lc-and-version-configs + - subresource-configs entrypoint: kubectl - command: - # We need to "unpause" lc-and-version-configs to allow deletion. + # We need to "unpause" subresource-configs to allow deletion. args: - patch - --type=merge @@ -471,7 +504,7 @@ spec: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 kind: Bucket metadata: - name: lc-and-version-configs + name: subresource-configs - error: resource: apiVersion: provider-ceph.ceph.crossplane.io/v1alpha1 @@ -479,25 +512,25 @@ spec: metadata: name: auto-pause-bucket - - name: Check for lc-and-version-configs and auto-pause-bucket on backends. + - name: Check for subresource-configs and auto-pause-bucket on backends. try: - # Check for lc-and-version-configs on all backends. + # Check for subresource-configs on all backends. - command: args: - bucket_does_not_exist - - lc-and-version-configs + - subresource-configs - local-dev-control-plane:32566 entrypoint: ../../../hack/expect_bucket.sh - command: args: - bucket_does_not_exist - - lc-and-version-configs + - subresource-configs - local-dev-control-plane:32567 entrypoint: ../../../hack/expect_bucket.sh - command: args: - bucket_does_not_exist - - lc-and-version-configs + - subresource-configs - local-dev-control-plane:32568 entrypoint: ../../../hack/expect_bucket.sh # Check for auto-pause-bucket on all backends. diff --git a/internal/backendstore/backend.go b/internal/backendstore/backend.go index 069c76c8..75e37f2d 100644 --- a/internal/backendstore/backend.go +++ b/internal/backendstore/backend.go @@ -46,6 +46,8 @@ type S3Client interface { DeleteBucketPolicy(context.Context, *s3.DeleteBucketPolicyInput, ...func(*s3.Options)) (*s3.DeleteBucketPolicyOutput, error) PutBucketVersioning(context.Context, *s3.PutBucketVersioningInput, ...func(*s3.Options)) (*s3.PutBucketVersioningOutput, error) GetBucketVersioning(context.Context, *s3.GetBucketVersioningInput, ...func(*s3.Options)) (*s3.GetBucketVersioningOutput, error) + PutObjectLockConfiguration(context.Context, *s3.PutObjectLockConfigurationInput, ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error) + GetObjectLockConfiguration(context.Context, *s3.GetObjectLockConfigurationInput, ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) } //counterfeiter:generate . STSClient diff --git a/internal/backendstore/backendstorefakes/fake_s3client.go b/internal/backendstore/backendstorefakes/fake_s3client.go index 844e9c0c..791df4dd 100644 --- a/internal/backendstore/backendstorefakes/fake_s3client.go +++ b/internal/backendstore/backendstorefakes/fake_s3client.go @@ -160,6 +160,21 @@ type FakeS3Client struct { result1 *s3.GetObjectOutput result2 error } + GetObjectLockConfigurationStub func(context.Context, *s3.GetObjectLockConfigurationInput, ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) + getObjectLockConfigurationMutex sync.RWMutex + getObjectLockConfigurationArgsForCall []struct { + arg1 context.Context + arg2 *s3.GetObjectLockConfigurationInput + arg3 []func(*s3.Options) + } + getObjectLockConfigurationReturns struct { + result1 *s3.GetObjectLockConfigurationOutput + result2 error + } + getObjectLockConfigurationReturnsOnCall map[int]struct { + result1 *s3.GetObjectLockConfigurationOutput + result2 error + } HeadBucketStub func(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error) headBucketMutex sync.RWMutex headBucketArgsForCall []struct { @@ -280,6 +295,21 @@ type FakeS3Client struct { result1 *s3.PutObjectOutput result2 error } + PutObjectLockConfigurationStub func(context.Context, *s3.PutObjectLockConfigurationInput, ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error) + putObjectLockConfigurationMutex sync.RWMutex + putObjectLockConfigurationArgsForCall []struct { + arg1 context.Context + arg2 *s3.PutObjectLockConfigurationInput + arg3 []func(*s3.Options) + } + putObjectLockConfigurationReturns struct { + result1 *s3.PutObjectLockConfigurationOutput + result2 error + } + putObjectLockConfigurationReturnsOnCall map[int]struct { + result1 *s3.PutObjectLockConfigurationOutput + result2 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -944,6 +974,72 @@ func (fake *FakeS3Client) GetObjectReturnsOnCall(i int, result1 *s3.GetObjectOut }{result1, result2} } +func (fake *FakeS3Client) GetObjectLockConfiguration(arg1 context.Context, arg2 *s3.GetObjectLockConfigurationInput, arg3 ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + fake.getObjectLockConfigurationMutex.Lock() + ret, specificReturn := fake.getObjectLockConfigurationReturnsOnCall[len(fake.getObjectLockConfigurationArgsForCall)] + fake.getObjectLockConfigurationArgsForCall = append(fake.getObjectLockConfigurationArgsForCall, struct { + arg1 context.Context + arg2 *s3.GetObjectLockConfigurationInput + arg3 []func(*s3.Options) + }{arg1, arg2, arg3}) + stub := fake.GetObjectLockConfigurationStub + fakeReturns := fake.getObjectLockConfigurationReturns + fake.recordInvocation("GetObjectLockConfiguration", []interface{}{arg1, arg2, arg3}) + fake.getObjectLockConfigurationMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeS3Client) GetObjectLockConfigurationCallCount() int { + fake.getObjectLockConfigurationMutex.RLock() + defer fake.getObjectLockConfigurationMutex.RUnlock() + return len(fake.getObjectLockConfigurationArgsForCall) +} + +func (fake *FakeS3Client) GetObjectLockConfigurationCalls(stub func(context.Context, *s3.GetObjectLockConfigurationInput, ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error)) { + fake.getObjectLockConfigurationMutex.Lock() + defer fake.getObjectLockConfigurationMutex.Unlock() + fake.GetObjectLockConfigurationStub = stub +} + +func (fake *FakeS3Client) GetObjectLockConfigurationArgsForCall(i int) (context.Context, *s3.GetObjectLockConfigurationInput, []func(*s3.Options)) { + fake.getObjectLockConfigurationMutex.RLock() + defer fake.getObjectLockConfigurationMutex.RUnlock() + argsForCall := fake.getObjectLockConfigurationArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeS3Client) GetObjectLockConfigurationReturns(result1 *s3.GetObjectLockConfigurationOutput, result2 error) { + fake.getObjectLockConfigurationMutex.Lock() + defer fake.getObjectLockConfigurationMutex.Unlock() + fake.GetObjectLockConfigurationStub = nil + fake.getObjectLockConfigurationReturns = struct { + result1 *s3.GetObjectLockConfigurationOutput + result2 error + }{result1, result2} +} + +func (fake *FakeS3Client) GetObjectLockConfigurationReturnsOnCall(i int, result1 *s3.GetObjectLockConfigurationOutput, result2 error) { + fake.getObjectLockConfigurationMutex.Lock() + defer fake.getObjectLockConfigurationMutex.Unlock() + fake.GetObjectLockConfigurationStub = nil + if fake.getObjectLockConfigurationReturnsOnCall == nil { + fake.getObjectLockConfigurationReturnsOnCall = make(map[int]struct { + result1 *s3.GetObjectLockConfigurationOutput + result2 error + }) + } + fake.getObjectLockConfigurationReturnsOnCall[i] = struct { + result1 *s3.GetObjectLockConfigurationOutput + result2 error + }{result1, result2} +} + func (fake *FakeS3Client) HeadBucket(arg1 context.Context, arg2 *s3.HeadBucketInput, arg3 ...func(*s3.Options)) (*s3.HeadBucketOutput, error) { fake.headBucketMutex.Lock() ret, specificReturn := fake.headBucketReturnsOnCall[len(fake.headBucketArgsForCall)] @@ -1472,6 +1568,72 @@ func (fake *FakeS3Client) PutObjectReturnsOnCall(i int, result1 *s3.PutObjectOut }{result1, result2} } +func (fake *FakeS3Client) PutObjectLockConfiguration(arg1 context.Context, arg2 *s3.PutObjectLockConfigurationInput, arg3 ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error) { + fake.putObjectLockConfigurationMutex.Lock() + ret, specificReturn := fake.putObjectLockConfigurationReturnsOnCall[len(fake.putObjectLockConfigurationArgsForCall)] + fake.putObjectLockConfigurationArgsForCall = append(fake.putObjectLockConfigurationArgsForCall, struct { + arg1 context.Context + arg2 *s3.PutObjectLockConfigurationInput + arg3 []func(*s3.Options) + }{arg1, arg2, arg3}) + stub := fake.PutObjectLockConfigurationStub + fakeReturns := fake.putObjectLockConfigurationReturns + fake.recordInvocation("PutObjectLockConfiguration", []interface{}{arg1, arg2, arg3}) + fake.putObjectLockConfigurationMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeS3Client) PutObjectLockConfigurationCallCount() int { + fake.putObjectLockConfigurationMutex.RLock() + defer fake.putObjectLockConfigurationMutex.RUnlock() + return len(fake.putObjectLockConfigurationArgsForCall) +} + +func (fake *FakeS3Client) PutObjectLockConfigurationCalls(stub func(context.Context, *s3.PutObjectLockConfigurationInput, ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error)) { + fake.putObjectLockConfigurationMutex.Lock() + defer fake.putObjectLockConfigurationMutex.Unlock() + fake.PutObjectLockConfigurationStub = stub +} + +func (fake *FakeS3Client) PutObjectLockConfigurationArgsForCall(i int) (context.Context, *s3.PutObjectLockConfigurationInput, []func(*s3.Options)) { + fake.putObjectLockConfigurationMutex.RLock() + defer fake.putObjectLockConfigurationMutex.RUnlock() + argsForCall := fake.putObjectLockConfigurationArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeS3Client) PutObjectLockConfigurationReturns(result1 *s3.PutObjectLockConfigurationOutput, result2 error) { + fake.putObjectLockConfigurationMutex.Lock() + defer fake.putObjectLockConfigurationMutex.Unlock() + fake.PutObjectLockConfigurationStub = nil + fake.putObjectLockConfigurationReturns = struct { + result1 *s3.PutObjectLockConfigurationOutput + result2 error + }{result1, result2} +} + +func (fake *FakeS3Client) PutObjectLockConfigurationReturnsOnCall(i int, result1 *s3.PutObjectLockConfigurationOutput, result2 error) { + fake.putObjectLockConfigurationMutex.Lock() + defer fake.putObjectLockConfigurationMutex.Unlock() + fake.PutObjectLockConfigurationStub = nil + if fake.putObjectLockConfigurationReturnsOnCall == nil { + fake.putObjectLockConfigurationReturnsOnCall = make(map[int]struct { + result1 *s3.PutObjectLockConfigurationOutput + result2 error + }) + } + fake.putObjectLockConfigurationReturnsOnCall[i] = struct { + result1 *s3.PutObjectLockConfigurationOutput + result2 error + }{result1, result2} +} + func (fake *FakeS3Client) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -1495,6 +1657,8 @@ func (fake *FakeS3Client) Invocations() map[string][][]interface{} { defer fake.getBucketVersioningMutex.RUnlock() fake.getObjectMutex.RLock() defer fake.getObjectMutex.RUnlock() + fake.getObjectLockConfigurationMutex.RLock() + defer fake.getObjectLockConfigurationMutex.RUnlock() fake.headBucketMutex.RLock() defer fake.headBucketMutex.RUnlock() fake.listObjectVersionsMutex.RLock() @@ -1511,6 +1675,8 @@ func (fake *FakeS3Client) Invocations() map[string][][]interface{} { defer fake.putBucketVersioningMutex.RUnlock() fake.putObjectMutex.RLock() defer fake.putObjectMutex.RUnlock() + fake.putObjectLockConfigurationMutex.RLock() + defer fake.putObjectLockConfigurationMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/controller/bucket/acl.go b/internal/controller/bucket/acl.go index 55bea4ef..165bbe12 100644 --- a/internal/controller/bucket/acl.go +++ b/internal/controller/bucket/acl.go @@ -46,7 +46,7 @@ func (l *ACLClient) Observe(ctx context.Context, bucket *v1alpha1.Bucket, backen for i := 0; i < len(backendNames); i++ { observation := <-observationChan - if observation != Updated { + if observation == NeedsUpdate || observation == NeedsDeletion { return observation, nil } } @@ -92,9 +92,8 @@ func (l *ACLClient) Handle(ctx context.Context, b *v1alpha1.Bucket, backendName defer span.End() switch l.observeBackend(b, backendName) { - case Updated: + case NoAction, Updated: return nil - case NeedsUpdate, NeedsDeletion: if err := l.createOrUpdate(ctx, b, backendName); err != nil { err = errors.Wrap(err, errHandleAcl) diff --git a/internal/controller/bucket/bucket_backends.go b/internal/controller/bucket/bucket_backends.go index 3e105139..ba8207e9 100644 --- a/internal/controller/bucket/bucket_backends.go +++ b/internal/controller/bucket/bucket_backends.go @@ -111,6 +111,36 @@ func (b *bucketBackends) getVersioningConfigCondition(bucketName, backendName st return b.backends[bucketName][backendName].VersioningConfigurationCondition } +func (b *bucketBackends) setObjectLockConfigCondition(bucketName, backendName string, c *xpv1.Condition) { + b.mu.Lock() + defer b.mu.Unlock() + + if b.backends[bucketName] == nil { + b.backends[bucketName] = make(v1alpha1.Backends) + } + + if b.backends[bucketName][backendName] == nil { + b.backends[bucketName][backendName] = &v1alpha1.BackendInfo{} + } + + b.backends[bucketName][backendName].ObjectLockConfigurationCondition = c +} + +func (b *bucketBackends) getObjectLockConfigCondition(bucketName, backendName string) *xpv1.Condition { + b.mu.RLock() + defer b.mu.RUnlock() + + if _, ok := b.backends[bucketName]; !ok { + return nil + } + + if _, ok := b.backends[bucketName][backendName]; !ok { + return nil + } + + return b.backends[bucketName][backendName].ObjectLockConfigurationCondition +} + func (b *bucketBackends) deleteBackend(bucketName, backendName string) { b.mu.Lock() defer b.mu.Unlock() @@ -251,3 +281,23 @@ func (b *bucketBackends) isVersioningConfigRemovedFromBackends(bucketName string return true } + +// isObjectLockConfigAvailableOnBackends checks the backends listed in providerNames against +// bucketBackends to ensure object lock configurations are considered Available on all desired backends. +func (b *bucketBackends) isObjectLockConfigAvailableOnBackends(bucketName string, providerNames []string, c map[string]backendstore.S3Client) bool { + for _, backendName := range providerNames { + if _, ok := c[backendName]; !ok { + // This backend does not exist in the list of available backends. + // The backend may be offline, so it is skipped. + continue + } + + vCondition := b.getObjectLockConfigCondition(bucketName, backendName) + if vCondition == nil || !vCondition.Equal(xpv1.Available()) { + // The object lock config is not Available on this backend. + return false + } + } + + return true +} diff --git a/internal/controller/bucket/consts.go b/internal/controller/bucket/consts.go index 3c7f1df7..e58f52ad 100644 --- a/internal/controller/bucket/consts.go +++ b/internal/controller/bucket/consts.go @@ -24,6 +24,10 @@ const ( errObserveVersioningConfig = "failed to observe bucket versioning configuration" errHandleVersioningConfig = "failed to handle bucket versioning configuration" + // Object lock configuration error messages. + errObserveObjectLockConfig = "failed to observe object lock configuration" + errHandleObjectLockConfig = "failed to handle object lock configuration" + // ACL error messages. errObserveAcl = "failed to observe bucket acl" errHandleAcl = "failed to handle bucket acl" diff --git a/internal/controller/bucket/helpers.go b/internal/controller/bucket/helpers.go index f2b49c74..a26bbf33 100644 --- a/internal/controller/bucket/helpers.go +++ b/internal/controller/bucket/helpers.go @@ -72,6 +72,12 @@ func isPauseRequired(bucket *v1alpha1.Bucket, providerNames []string, c map[stri return false } + // Avoid pausing when an object lock configuration is specified in the spec, but not all + // object lock configs are available. + if bucket.Spec.ForProvider.ObjectLockConfiguration != nil && !bb.isObjectLockConfigAvailableOnBackends(bucket.Name, providerNames, c) { + return false + } + return (bucket.Spec.AutoPause || autopauseEnabled) && // Only return true if this label value is "". // This is to allow the user to delete a paused bucket with autopause enabled. diff --git a/internal/controller/bucket/helpers_test.go b/internal/controller/bucket/helpers_test.go index f8eeff9f..02fbf6a2 100644 --- a/internal/controller/bucket/helpers_test.go +++ b/internal/controller/bucket/helpers_test.go @@ -1225,6 +1225,176 @@ func TestIsPauseRequired(t *testing.T) { pauseIsRequired: true, }, }, + "Object lock config specified but unavailable on one backend - no pause": { + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Labels: map[string]string{ + meta.AnnotationKeyReconciliationPaused: "", + }, + }, + Spec: v1alpha1.BucketSpec{ + AutoPause: true, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, + }, + }, + Status: v1alpha1.BucketStatus{ + ResourceStatus: xpv1.ResourceStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + xpv1.Available(), + xpv1.ReconcileSuccess(), + }, + }, + }, + }, + }, + providerNames: []string{"s3-backend-1", "s3-backend-2", "s3-backend-3"}, + clients: map[string]backendstore.S3Client{ + "s3-backend-1": nil, + "s3-backend-2": nil, + "s3-backend-3": nil, + }, + bucketBackends: &bucketBackends{ + backends: map[string]v1alpha1.Backends{ + "bucket": { + "s3-backend-1": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + "s3-backend-2": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + "s3-backend-3": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &unavailable, + }, + }, + }, + }, + }, + want: want{ + pauseIsRequired: false, + }, + }, + "Object lock config specified but missing on one backend - no pause": { + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Labels: map[string]string{ + meta.AnnotationKeyReconciliationPaused: "", + }, + }, + Spec: v1alpha1.BucketSpec{ + AutoPause: true, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, + }, + }, + Status: v1alpha1.BucketStatus{ + ResourceStatus: xpv1.ResourceStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + xpv1.Available(), + xpv1.ReconcileSuccess(), + }, + }, + }, + }, + }, + providerNames: []string{"s3-backend-1", "s3-backend-2", "s3-backend-3"}, + clients: map[string]backendstore.S3Client{ + "s3-backend-1": nil, + "s3-backend-2": nil, + "s3-backend-3": nil, + }, + bucketBackends: &bucketBackends{ + backends: map[string]v1alpha1.Backends{ + "bucket": { + "s3-backend-1": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + "s3-backend-2": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + "s3-backend-3": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + }, + }, + }, + }, + }, + want: want{ + pauseIsRequired: false, + }, + }, + "Object lock config specified and available on all backends - pause": { + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Labels: map[string]string{ + meta.AnnotationKeyReconciliationPaused: "", + }, + }, + Spec: v1alpha1.BucketSpec{ + AutoPause: true, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, + }, + }, + Status: v1alpha1.BucketStatus{ + ResourceStatus: xpv1.ResourceStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + xpv1.Available(), + xpv1.ReconcileSuccess(), + }, + }, + }, + }, + }, + providerNames: []string{"s3-backend-1", "s3-backend-2", "s3-backend-3"}, + clients: map[string]backendstore.S3Client{ + "s3-backend-1": nil, + "s3-backend-2": nil, + "s3-backend-3": nil, + }, + bucketBackends: &bucketBackends{ + backends: map[string]v1alpha1.Backends{ + "bucket": { + "s3-backend-1": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + "s3-backend-2": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + "s3-backend-3": &v1alpha1.BackendInfo{ + BucketCondition: xpv1.Available(), + ObjectLockConfigurationCondition: &available, + }, + }, + }, + }, + }, + want: want{ + pauseIsRequired: true, + }, + }, "All subresources specified and available on all backends and autopause enabled for bucket - pause": { args: args{ bucket: &v1alpha1.Bucket{ @@ -1247,6 +1417,9 @@ func TestIsPauseRequired(t *testing.T) { VersioningConfiguration: &v1alpha1.VersioningConfiguration{ Status: &vEnabled, }, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, }, }, Status: v1alpha1.BucketStatus{ @@ -1273,16 +1446,19 @@ func TestIsPauseRequired(t *testing.T) { BucketCondition: xpv1.Available(), LifecycleConfigurationCondition: &available, VersioningConfigurationCondition: &available, + ObjectLockConfigurationCondition: &available, }, "s3-backend-2": &v1alpha1.BackendInfo{ BucketCondition: xpv1.Available(), LifecycleConfigurationCondition: &available, VersioningConfigurationCondition: &available, + ObjectLockConfigurationCondition: &available, }, "s3-backend-3": &v1alpha1.BackendInfo{ BucketCondition: xpv1.Available(), LifecycleConfigurationCondition: &available, VersioningConfigurationCondition: &available, + ObjectLockConfigurationCondition: &available, }, }, }, @@ -1313,6 +1489,9 @@ func TestIsPauseRequired(t *testing.T) { VersioningConfiguration: &v1alpha1.VersioningConfiguration{ Status: &vEnabled, }, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, }, }, Status: v1alpha1.BucketStatus{ @@ -1339,16 +1518,19 @@ func TestIsPauseRequired(t *testing.T) { BucketCondition: xpv1.Available(), LifecycleConfigurationCondition: &available, VersioningConfigurationCondition: &available, + ObjectLockConfigurationCondition: &available, }, "s3-backend-2": &v1alpha1.BackendInfo{ BucketCondition: xpv1.Available(), LifecycleConfigurationCondition: &available, VersioningConfigurationCondition: &available, + ObjectLockConfigurationCondition: &available, }, "s3-backend-3": &v1alpha1.BackendInfo{ BucketCondition: xpv1.Available(), LifecycleConfigurationCondition: &available, VersioningConfigurationCondition: &available, + ObjectLockConfigurationCondition: &available, }, }, }, diff --git a/internal/controller/bucket/lifecycleconfiguration.go b/internal/controller/bucket/lifecycleconfiguration.go index 88ae3d8e..cce4a5fe 100644 --- a/internal/controller/bucket/lifecycleconfiguration.go +++ b/internal/controller/bucket/lifecycleconfiguration.go @@ -66,7 +66,7 @@ func (l *LifecycleConfigurationClient) Observe(ctx context.Context, bucket *v1al return NeedsUpdate, err case observation := <-observationChan: - if observation != Updated { + if observation == NeedsUpdate || observation == NeedsDeletion { return observation, nil } case err := <-errChan: @@ -85,11 +85,11 @@ func (l *LifecycleConfigurationClient) observeBackend(ctx context.Context, bucke l.log.Info("Observing subresource lifecycle configuration on backend", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) if l.backendStore.GetBackendHealthStatus(backendName) == apisv1alpha1.HealthStatusUnhealthy { - // If a backend is marked as unhealthy, we can ignore it for now by returning Updated. + // If a backend is marked as unhealthy, we can ignore it for now by returning NoAction. // The backend may be down for some time and we do not want to block Create/Update/Delete // calls on other backends. By returning NeedsUpdate here, we would never pass the Observe // phase until the backend becomes Healthy or Disabled. - return Updated, nil + return NoAction, nil } s3Client, err := l.s3ClientHandler.GetS3Client(ctx, bucket, backendName) @@ -108,7 +108,7 @@ func (l *LifecycleConfigurationClient) observeBackend(ctx context.Context, bucke // No lifecycle config found on this backend. l.log.Info("No lifecycle configuration found on backend - no action required", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) - return Updated, nil + return NoAction, nil } else { l.log.Info("Lifecycle configuration found on backend - requires deletion", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) @@ -158,8 +158,14 @@ func (l *LifecycleConfigurationClient) Handle(ctx context.Context, b *v1alpha1.B } switch observation { - case Updated: + case NoAction: return nil + case Updated: + // The lifecycle config is updated, so we can consider this + // sub resource Available. + available := xpv1.Available() + bb.setLifecycleConfigCondition(b.Name, backendName, &available) + case NeedsDeletion: if err := l.delete(ctx, b, backendName); err != nil { err = errors.Wrap(err, errHandleLifecycleConfig) diff --git a/internal/controller/bucket/lifecycleconfiguration_test.go b/internal/controller/bucket/lifecycleconfiguration_test.go index 54f2c834..1df333a8 100644 --- a/internal/controller/bucket/lifecycleconfiguration_test.go +++ b/internal/controller/bucket/lifecycleconfiguration_test.go @@ -100,7 +100,7 @@ func TestObserveBackend(t *testing.T) { err: errExternal, }, }, - "Attempt to observe lifecycle config on unhealthy backend (consider it updated to unblock)": { + "Attempt to observe lifecycle config on unhealthy backend (consider it NoAction to unblock)": { fields: fields{ backendStore: func() *backendstore.BackendStore { fake := backendstorefakes.FakeS3Client{} @@ -120,7 +120,7 @@ func TestObserveBackend(t *testing.T) { backendName: "s3-backend-1", }, want: want{ - status: Updated, + status: NoAction, err: nil, }, }, @@ -162,7 +162,7 @@ func TestObserveBackend(t *testing.T) { err: nil, }, }, - "Lifecycle config not specified in CR and does exists on backend so is Updated": { + "Lifecycle config not specified in CR and does exists on backend so NoAction": { fields: fields{ backendStore: func() *backendstore.BackendStore { fake := backendstorefakes.FakeS3Client{ @@ -192,7 +192,7 @@ func TestObserveBackend(t *testing.T) { backendName: "s3-backend-1", }, want: want{ - status: Updated, + status: NoAction, err: nil, }, }, @@ -245,7 +245,7 @@ func TestObserveBackend(t *testing.T) { err: nil, }, }, - "Lifecycle config specified in CR and disabled but does not exist on backend so is Updated": { + "Lifecycle config specified in CR and disabled but does not exist on backend so is NoAction": { fields: fields{ backendStore: func() *backendstore.BackendStore { fake := backendstorefakes.FakeS3Client{ @@ -286,7 +286,7 @@ func TestObserveBackend(t *testing.T) { backendName: "s3-backend-1", }, want: want{ - status: Updated, + status: NoAction, err: nil, }, }, diff --git a/internal/controller/bucket/objectlockconfiguration.go b/internal/controller/bucket/objectlockconfiguration.go new file mode 100644 index 00000000..c7ab7c0b --- /dev/null +++ b/internal/controller/bucket/objectlockconfiguration.go @@ -0,0 +1,195 @@ +package bucket + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/document" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + + "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" + apisv1alpha1 "github.com/linode/provider-ceph/apis/v1alpha1" + "github.com/linode/provider-ceph/internal/backendstore" + "github.com/linode/provider-ceph/internal/consts" + "github.com/linode/provider-ceph/internal/controller/s3clienthandler" + "github.com/linode/provider-ceph/internal/otel/traces" + "github.com/linode/provider-ceph/internal/rgw" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "go.opentelemetry.io/otel" +) + +// ObjectLockConfigurationClient is the client for API methods and reconciling the ObjectLockConfiguration +type ObjectLockConfigurationClient struct { + backendStore *backendstore.BackendStore + s3ClientHandler *s3clienthandler.Handler + log logging.Logger +} + +func NewObjectLockConfigurationClient(b *backendstore.BackendStore, h *s3clienthandler.Handler, l logging.Logger) *ObjectLockConfigurationClient { + return &ObjectLockConfigurationClient{backendStore: b, s3ClientHandler: h, log: l} +} + +func (l *ObjectLockConfigurationClient) Observe(ctx context.Context, bucket *v1alpha1.Bucket, backendNames []string) (ResourceStatus, error) { + ctx, span := otel.Tracer("").Start(ctx, "bucket.ObjectLockConfigurationClient.Observe") + defer span.End() + + if bucket.Spec.ForProvider.ObjectLockEnabledForBucket == nil || !*bucket.Spec.ForProvider.ObjectLockEnabledForBucket { + l.log.Info("Object lock configuration not enabled in Bucket CR", consts.KeyBucketName, bucket.Name) + + return Updated, nil + } + + observationChan := make(chan ResourceStatus) + errChan := make(chan error) + + for _, backendName := range backendNames { + beName := backendName + go func() { + observation, err := l.observeBackend(ctx, bucket, beName) + if err != nil { + errChan <- err + + return + } + observationChan <- observation + }() + } + + for i := 0; i < len(backendNames); i++ { + select { + case <-ctx.Done(): + l.log.Info("Context timeout during object lock configuration observation", consts.KeyBucketName, bucket.Name) + err := errors.Wrap(ctx.Err(), errObserveObjectLockConfig) + traces.SetAndRecordError(span, err) + + return NeedsUpdate, err + case observation := <-observationChan: + if observation == NeedsUpdate || observation == NeedsDeletion { + return observation, nil + } + case err := <-errChan: + err = errors.Wrap(err, errObserveObjectLockConfig) + traces.SetAndRecordError(span, err) + + return NeedsUpdate, err + } + } + + return Updated, nil +} + +func (l *ObjectLockConfigurationClient) observeBackend(ctx context.Context, bucket *v1alpha1.Bucket, backendName string) (ResourceStatus, error) { + l.log.Info("Observing subresource object lock configuration on backend", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) + + if l.backendStore.GetBackendHealthStatus(backendName) == apisv1alpha1.HealthStatusUnhealthy { + // If a backend is marked as unhealthy, we can ignore it for now by returning NoAction. + // The backend may be down for some time and we do not want to block Create/Update/Delete + // calls on other backends. By returning NeedsUpdate here, we would never pass the Observe + // phase until the backend becomes Healthy or Disabled. + return NoAction, nil + } + + s3Client, err := l.s3ClientHandler.GetS3Client(ctx, bucket, backendName) + if err != nil { + return NeedsUpdate, err + } + response, err := rgw.GetObjectLockConfiguration(ctx, s3Client, aws.String(bucket.Name)) + if err != nil { + return NeedsUpdate, err + } + + external := &s3types.ObjectLockConfiguration{} + if response != nil && response.ObjectLockConfiguration != nil { + external = response.ObjectLockConfiguration + } + + desiredVersioningConfig := rgw.GenerateObjectLockConfiguration(bucket.Spec.ForProvider.ObjectLockConfiguration) + + if !cmp.Equal(external, desiredVersioningConfig, cmpopts.IgnoreTypes(document.NoSerde{})) { + l.log.Info("Object lock configuration requires update on backend", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) + + return NeedsUpdate, nil + } + + return Updated, nil +} + +func (l *ObjectLockConfigurationClient) Handle(ctx context.Context, b *v1alpha1.Bucket, backendName string, bb *bucketBackends) error { + ctx, span := otel.Tracer("").Start(ctx, "bucket.ObjectLockConfigurationClient.Handle") + defer span.End() + + if b.Spec.ForProvider.ObjectLockEnabledForBucket == nil || !*b.Spec.ForProvider.ObjectLockEnabledForBucket { + return nil + } + + observation, err := l.observeBackend(ctx, b, backendName) + if err != nil { + err = errors.Wrap(err, errHandleVersioningConfig) + traces.SetAndRecordError(span, err) + + return err + } + + switch observation { + case NoAction: + return nil + case Updated: + // The object lock config is updated, so we can consider this + // sub resource Available. + available := xpv1.Available() + bb.setObjectLockConfigCondition(b.Name, backendName, &available) + + return nil + case NeedsDeletion: + // Object lock configuration, once enabled, cannot be disabled/deleted. + return nil + case NeedsUpdate: + // Object lock configurations cannot be deleted. However, if object lock + // has been enabled for the bucket and no object lock configuration is + // specified in the Bucket CR Spec, we should default to a basic "enabled" + // object lock configuration. + bucketCopy := b.DeepCopy() + enabled := v1alpha1.ObjectLockEnabledEnabled + if b.Spec.ForProvider.ObjectLockConfiguration == nil { + bucketCopy.Spec.ForProvider.ObjectLockConfiguration = &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &enabled, + } + } + if err := l.createOrUpdate(ctx, bucketCopy, backendName); err != nil { + err = errors.Wrap(err, errHandleObjectLockConfig) + unavailable := xpv1.Unavailable().WithMessage(err.Error()) + bb.setObjectLockConfigCondition(bucketCopy.Name, backendName, &unavailable) + + traces.SetAndRecordError(span, err) + + return err + } + + available := xpv1.Available() + bb.setObjectLockConfigCondition(bucketCopy.Name, backendName, &available) + } + + return nil +} + +func (l *ObjectLockConfigurationClient) createOrUpdate(ctx context.Context, b *v1alpha1.Bucket, backendName string) error { + l.log.Info("Updating object lock configuration", consts.KeyBucketName, b.Name, consts.KeyBackendName, backendName) + s3Client, err := l.s3ClientHandler.GetS3Client(ctx, b, backendName) + if err != nil { + return err + } + + _, err = rgw.PutObjectLockConfiguration(ctx, s3Client, b) + if err != nil { + return err + } + + return nil +} diff --git a/internal/controller/bucket/objectlockconfiguration_test.go b/internal/controller/bucket/objectlockconfiguration_test.go new file mode 100644 index 00000000..77b3d7c5 --- /dev/null +++ b/internal/controller/bucket/objectlockconfiguration_test.go @@ -0,0 +1,508 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" + apisv1alpha1 "github.com/linode/provider-ceph/apis/v1alpha1" + "github.com/linode/provider-ceph/internal/backendstore" + "github.com/linode/provider-ceph/internal/backendstore/backendstorefakes" + "github.com/linode/provider-ceph/internal/controller/s3clienthandler" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var objLockEnabled = v1alpha1.ObjectLockEnabledEnabled + +func TestObjectLockConfigObserveBackend(t *testing.T) { + t.Parallel() + + type fields struct { + backendStore *backendstore.BackendStore + } + + type args struct { + bucket *v1alpha1.Bucket + backendName string + } + + type want struct { + status ResourceStatus + err error + } + + cases := map[string]struct { + reason string + fields fields + args args + want want + }{ + "Attempt to observe object lock config on unhealthy backend (consider it NoAction to unblock)": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{} + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusUnhealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + status: NoAction, + err: nil, + }, + }, + "External error getting object lock": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + + GetObjectLockConfigurationStub: func(ctx context.Context, lci *s3.GetObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + return &s3.GetObjectLockConfigurationOutput{}, errExternal + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + status: NeedsUpdate, + err: errExternal, + }, + }, + "Object lock config specified in CR and exists on backend and is the same so is Updated": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + + GetObjectLockConfigurationStub: func(ctx context.Context, lci *s3.GetObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + return &s3.GetObjectLockConfigurationOutput{ + ObjectLockConfiguration: &s3types.ObjectLockConfiguration{ + ObjectLockEnabled: s3types.ObjectLockEnabledEnabled, + }, + }, nil + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + status: Updated, + err: nil, + }, + }, + "Object lock config specified in CR and exists on backend but is different so is NeedsUpdate": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + + GetObjectLockConfigurationStub: func(ctx context.Context, lci *s3.GetObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + return &s3.GetObjectLockConfigurationOutput{ + ObjectLockConfiguration: &s3types.ObjectLockConfiguration{ + ObjectLockEnabled: s3types.ObjectLockEnabledEnabled, + Rule: &s3types.ObjectLockRule{ + DefaultRetention: &s3types.DefaultRetention{ + Mode: s3types.ObjectLockRetentionModeCompliance, + }, + }, + }, + }, nil + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + Rule: &v1alpha1.ObjectLockRule{ + DefaultRetention: &v1alpha1.DefaultRetention{ + Mode: v1alpha1.ModeGovernance, + }, + }, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + status: NeedsUpdate, + err: nil, + }, + }, + } + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + c := NewObjectLockConfigurationClient( + tc.fields.backendStore, + s3clienthandler.NewHandler( + s3clienthandler.WithAssumeRoleArn(nil), + s3clienthandler.WithBackendStore(tc.fields.backendStore)), + logging.NewNopLogger()) + + got, err := c.observeBackend(context.Background(), tc.args.bucket, tc.args.backendName) + require.ErrorIs(t, err, tc.want.err, "unexpected error") + assert.Equal(t, tc.want.status, got, "unexpected status") + }) + } +} + +//nolint:maintidx //Test with lots of cases. +func TestObjectLockConfigurationHandle(t *testing.T) { + t.Parallel() + bucketName := "bucket" + beName := "s3-backend-1" + creating := v1.Creating() + errRandom := errors.New("some error") + type fields struct { + backendStore *backendstore.BackendStore + } + + type args struct { + bucket *v1alpha1.Bucket + backendName string + } + + type want struct { + err error + specificDiff func(t *testing.T, bb *bucketBackends) + } + + cases := map[string]struct { + reason string + fields fields + args args + want want + }{ + "Object lock is not enabled for Bucket CR - nil value": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{} + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{}, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + err: nil, + }, + }, + "Object lock is not enabled for Bucket CR - false": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{} + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledFalse, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + err: nil, + }, + }, + "Object lock config is up to date so no action required": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + GetObjectLockConfigurationStub: func(ctx context.Context, lci *s3.GetObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + return &s3.GetObjectLockConfigurationOutput{ + ObjectLockConfiguration: &s3types.ObjectLockConfiguration{ + ObjectLockEnabled: s3types.ObjectLockEnabledEnabled, + Rule: &s3types.ObjectLockRule{ + DefaultRetention: &s3types.DefaultRetention{ + Mode: s3types.ObjectLockRetentionModeCompliance, + }, + }, + }, + }, nil + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + Rule: &v1alpha1.ObjectLockRule{ + DefaultRetention: &v1alpha1.DefaultRetention{ + Mode: v1alpha1.ModeCompliance, + }, + }, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + err: nil, + }, + }, + "Object lock config updates successfully": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + GetObjectLockConfigurationStub: func(ctx context.Context, lci *s3.GetObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + return &s3.GetObjectLockConfigurationOutput{ + ObjectLockConfiguration: &s3types.ObjectLockConfiguration{ + ObjectLockEnabled: s3types.ObjectLockEnabledEnabled, + Rule: &s3types.ObjectLockRule{ + DefaultRetention: &s3types.DefaultRetention{ + Mode: s3types.ObjectLockRetentionModeCompliance, + }, + }, + }, + }, nil + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + Rule: &v1alpha1.ObjectLockRule{ + DefaultRetention: &v1alpha1.DefaultRetention{ + Mode: v1alpha1.ModeGovernance, + }, + }, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + err: nil, + specificDiff: func(t *testing.T, bb *bucketBackends) { + t.Helper() + backends := bb.getBackends(bucketName, []string{beName}) + assert.True(t, + backends[beName].ObjectLockConfigurationCondition.Equal(v1.Available()), + "unexpected object lock config condition on s3-backend-1") + }, + }, + }, + "Versioning config update fails": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + GetObjectLockConfigurationStub: func(ctx context.Context, lci *s3.GetObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.GetObjectLockConfigurationOutput, error) { + return &s3.GetObjectLockConfigurationOutput{ + ObjectLockConfiguration: &s3types.ObjectLockConfiguration{ + ObjectLockEnabled: s3types.ObjectLockEnabledEnabled, + Rule: &s3types.ObjectLockRule{ + DefaultRetention: &s3types.DefaultRetention{ + Mode: s3types.ObjectLockRetentionModeCompliance, + }, + }, + }, + }, nil + }, + PutObjectLockConfigurationStub: func(ctx context.Context, lci *s3.PutObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error) { + return &s3.PutObjectLockConfigurationOutput{}, errRandom + }, + } + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &objLockEnabled, + Rule: &v1alpha1.ObjectLockRule{ + DefaultRetention: &v1alpha1.DefaultRetention{ + Mode: v1alpha1.ModeGovernance, + }, + }, + }, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + err: errRandom, + specificDiff: func(t *testing.T, bb *bucketBackends) { + t.Helper() + backends := bb.getBackends(bucketName, []string{beName}) + assert.True(t, + backends[beName].ObjectLockConfigurationCondition.Equal(v1.Unavailable(). + WithMessage(errors.Wrap(errors.Wrap(errRandom, "failed to put object lock configuration"), errHandleObjectLockConfig).Error())), + "unexpected versioning config condition on s3-backend-1") + }, + }, + }, + } + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + c := NewObjectLockConfigurationClient( + tc.fields.backendStore, + s3clienthandler.NewHandler( + s3clienthandler.WithAssumeRoleArn(nil), + s3clienthandler.WithBackendStore(tc.fields.backendStore)), + logging.NewNopLogger()) + + bb := newBucketBackends() + bb.setObjectLockConfigCondition(bucketName, beName, &creating) + + err := c.Handle(context.Background(), tc.args.bucket, tc.args.backendName, bb) + require.ErrorIs(t, err, tc.want.err, "unexpected error") + if tc.want.specificDiff != nil { + tc.want.specificDiff(t, bb) + } + }) + } +} diff --git a/internal/controller/bucket/observe.go b/internal/controller/bucket/observe.go index 0d1b8a41..a8c1b5e2 100644 --- a/internal/controller/bucket/observe.go +++ b/internal/controller/bucket/observe.go @@ -85,7 +85,7 @@ func (c *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{}, err } - if obs != Updated { + if obs == NeedsUpdate || obs == NeedsDeletion { return managed.ExternalObservation{ ResourceExists: true, ResourceUpToDate: false, diff --git a/internal/controller/bucket/policy.go b/internal/controller/bucket/policy.go index ff4a14e7..0b6e1862 100644 --- a/internal/controller/bucket/policy.go +++ b/internal/controller/bucket/policy.go @@ -60,7 +60,7 @@ func (p *PolicyClient) Observe(ctx context.Context, bucket *v1alpha1.Bucket, bac return NeedsUpdate, err case observation := <-observationChan: - if observation != Updated { + if observation == NeedsUpdate || observation == NeedsDeletion { return observation, nil } case err := <-errChan: @@ -140,7 +140,7 @@ func (p *PolicyClient) Handle(ctx context.Context, b *v1alpha1.Bucket, backendNa } switch observation { - case Updated: + case NoAction, Updated: return nil case NeedsDeletion: if err := p.delete(ctx, b, backendName); err != nil { diff --git a/internal/controller/bucket/subresources.go b/internal/controller/bucket/subresources.go index 0aa67d95..634f2a50 100644 --- a/internal/controller/bucket/subresources.go +++ b/internal/controller/bucket/subresources.go @@ -39,6 +39,7 @@ func NewSubresourceClients(b *backendstore.BackendStore, h *s3clienthandler.Hand NewACLClient(b, h, l.WithValues("acl-client", managed.ControllerName(v1alpha1.BucketGroupKind))), NewPolicyClient(b, h, l.WithValues("policy-client", managed.ControllerName(v1alpha1.BucketGroupKind))), NewVersioningConfigurationClient(b, h, l.WithValues("versioning-configuration-client", managed.ControllerName(v1alpha1.BucketGroupKind))), + NewObjectLockConfigurationClient(b, h, l.WithValues("object-lock-configuration-client", managed.ControllerName(v1alpha1.BucketGroupKind))), } } @@ -46,8 +47,10 @@ func NewSubresourceClients(b *backendstore.BackendStore, h *s3clienthandler.Hand type ResourceStatus int const ( + // NoAction is returned if the resource requires no action. + NoAction ResourceStatus = iota // Updated is returned if the resource is updated. - Updated ResourceStatus = iota + Updated // NeedsUpdate is returned if the resource required updating. NeedsUpdate // NeedsDeletion is returned if the resource needs to be deleted. diff --git a/internal/controller/bucket/update_test.go b/internal/controller/bucket/update_test.go index 02f30f20..75415244 100644 --- a/internal/controller/bucket/update_test.go +++ b/internal/controller/bucket/update_test.go @@ -28,6 +28,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +var vEnabled = v1alpha1.VersioningStatusEnabled +var lEnabled = v1alpha1.ObjectLockEnabledEnabled + func TestUpdateBasicErrors(t *testing.T) { t.Parallel() @@ -395,6 +398,22 @@ func TestUpdate(t *testing.T) { Providers: []string{ "s3-backend-1", }, + ForProvider: v1alpha1.BucketParameters{ + LifecycleConfiguration: &v1alpha1.BucketLifecycleConfiguration{ + Rules: []v1alpha1.LifecycleRule{ + { + Status: "Enabled", + }, + }, + }, + VersioningConfiguration: &v1alpha1.VersioningConfiguration{ + Status: &vEnabled, + }, + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &lEnabled, + }, + }, }, }, }, @@ -414,6 +433,15 @@ func TestUpdate(t *testing.T) { assert.True(t, bucket.Status.AtProvider.Backends["s3-backend-1"].BucketCondition.Equal(v1.Available()), "bucket condition on s3-backend-1 is not available") + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].LifecycleConfigurationCondition.Equal(v1.Available()), + "lifecycle config condition on s3-backend-1 is not available") + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].VersioningConfigurationCondition.Equal(v1.Available()), + "versioning config condition on s3-backend-1 is not available") + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].ObjectLockConfigurationCondition.Equal(v1.Available()), + "object lock config condition on s3-backend-1 is not available") assert.Equal(t, map[string]string{ @@ -443,16 +471,19 @@ func TestUpdate(t *testing.T) { WithStatusSubresource(tc.fields.initObjects...). WithScheme(s).Build() + s3ClientHandler := s3clienthandler.NewHandler( + s3clienthandler.WithAssumeRoleArn(tc.fields.roleArn), + s3clienthandler.WithBackendStore(tc.fields.backendStore), + s3clienthandler.WithKubeClient(cl)) + e := external{ - kubeClient: cl, - backendStore: tc.fields.backendStore, - s3ClientHandler: s3clienthandler.NewHandler( - s3clienthandler.WithAssumeRoleArn(tc.fields.roleArn), - s3clienthandler.WithBackendStore(tc.fields.backendStore), - s3clienthandler.WithKubeClient(cl)), - autoPauseBucket: tc.fields.autoPauseBucket, - minReplicas: 1, - log: logging.NewNopLogger(), + kubeClient: cl, + backendStore: tc.fields.backendStore, + s3ClientHandler: s3ClientHandler, + autoPauseBucket: tc.fields.autoPauseBucket, + minReplicas: 1, + log: logging.NewNopLogger(), + subresourceClients: NewSubresourceClients(tc.fields.backendStore, s3ClientHandler, logging.NewNopLogger()), } got, err := e.Update(context.Background(), tc.args.mg) @@ -784,7 +815,6 @@ func TestUpdateLifecycleConfigSubResource(t *testing.T) { func TestUpdateVersioningConfigSubResource(t *testing.T) { t.Parallel() someError := errors.New("some error") - vEnabled := v1alpha1.VersioningStatusEnabled type fields struct { backendStore *backendstore.BackendStore @@ -1080,3 +1110,308 @@ func TestUpdateVersioningConfigSubResource(t *testing.T) { }) } } + +//nolint:maintidx // Function requires numerous checks. +func TestUpdateObjectLockConfigSubResource(t *testing.T) { + t.Parallel() + someError := errors.New("some error") + + type fields struct { + backendStore *backendstore.BackendStore + autoPauseBucket bool + roleArn *string + initObjects []client.Object + } + + type args struct { + mg resource.Managed + } + + type want struct { + o managed.ExternalUpdate + err error + specificDiff func(t *testing.T, mg resource.Managed) + } + + cases := map[string]struct { + reason string + fields fields + args args + want want + }{ + "Two backends update object lock configuration successfully": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{} + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + bs.AddOrUpdateBackend("s3-backend-2", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + mg: &v1alpha1.Bucket{ + Spec: v1alpha1.BucketSpec{ + Providers: []string{ + "s3-backend-1", + "s3-backend-2", + }, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &lEnabled, + }, + }, + }, + }, + }, + want: want{ + o: managed.ExternalUpdate{}, + specificDiff: func(t *testing.T, mg resource.Managed) { + t.Helper() + bucket, _ := mg.(*v1alpha1.Bucket) + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].ObjectLockConfigurationCondition.Equal(v1.Available()), + + "object lock configuration condition on s3-backend-1 is not available") + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-2"].ObjectLockConfigurationCondition.Equal(v1.Available()), + "object lock configuration condition on s3-backend-2 is not available") + }, + }, + }, + "Two backends fail to update versioning config": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + PutObjectLockConfigurationStub: func(ctx context.Context, hbi *s3.PutObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error) { + return &s3.PutObjectLockConfigurationOutput{}, someError + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + bs.AddOrUpdateBackend("s3-backend-2", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + mg: &v1alpha1.Bucket{ + Spec: v1alpha1.BucketSpec{ + Providers: []string{ + "s3-backend-1", + "s3-backend-2", + }, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &lEnabled, + }, + }, + }, + }, + }, + want: want{ + err: someError, + o: managed.ExternalUpdate{}, + specificDiff: func(t *testing.T, mg resource.Managed) { + t.Helper() + bucket, _ := mg.(*v1alpha1.Bucket) + unavailableBackends := []string{"s3-backend-1", "s3-backend-2"} + slices.Sort(unavailableBackends) + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].ObjectLockConfigurationCondition.Equal( + v1.Unavailable().WithMessage( + errors.Wrap( + errors.Wrap(someError, "failed to put object lock configuration"), + "failed to handle object lock configuration").Error(), + ), + ), + "unexpected object lock configuration condition for s3-backend-1") + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-2"].ObjectLockConfigurationCondition.Equal( + v1.Unavailable().WithMessage( + errors.Wrap( + errors.Wrap(someError, "failed to put object lock configuration"), + "failed to handle object lock configuration").Error(), + ), + ), + "unexpected object lock configuration condition for s3-backend-1") + }, + }, + }, + "One backend updates object lock configuration successfully and one fails to update": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fakeErr := backendstorefakes.FakeS3Client{ + PutObjectLockConfigurationStub: func(ctx context.Context, hbi *s3.PutObjectLockConfigurationInput, f ...func(*s3.Options)) (*s3.PutObjectLockConfigurationOutput, error) { + return &s3.PutObjectLockConfigurationOutput{}, someError + }, + } + fakeOK := backendstorefakes.FakeS3Client{} + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fakeOK, nil, true, apisv1alpha1.HealthStatusHealthy) + bs.AddOrUpdateBackend("s3-backend-2", &fakeErr, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + mg: &v1alpha1.Bucket{ + Spec: v1alpha1.BucketSpec{ + Providers: []string{ + "s3-backend-1", + "s3-backend-2", + }, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &lEnabled, + }, + }, + }, + }, + }, + want: want{ + err: someError, + o: managed.ExternalUpdate{}, + specificDiff: func(t *testing.T, mg resource.Managed) { + t.Helper() + bucket, _ := mg.(*v1alpha1.Bucket) + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].ObjectLockConfigurationCondition.Equal(v1.Available()), + "unexpected object lock configuration condition for s3-backend-1") + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-2"].ObjectLockConfigurationCondition.Equal( + v1.Unavailable().WithMessage( + errors.Wrap( + errors.Wrap(someError, "failed to put object lock configuration"), + "failed to handle object lock configuration").Error(), + ), + ), + "unexpected object lock configuration condition for s3-backend-1") + }, + }, + }, + "Single backend updates object lock configuration successfully and is autopaused": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{} + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + autoPauseBucket: true, + initObjects: []client.Object{ + &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Annotations: map[string]string{ + "test": "test", + }, + }, + }, + }, + }, + args: args{ + mg: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Annotations: map[string]string{ + "test": "test", + }, + }, + Spec: v1alpha1.BucketSpec{ + Providers: []string{ + "s3-backend-1", + }, + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + ObjectLockConfiguration: &v1alpha1.ObjectLockConfiguration{ + ObjectLockEnabled: &lEnabled, + }, + }, + }, + }, + }, + want: want{ + o: managed.ExternalUpdate{}, + specificDiff: func(t *testing.T, mg resource.Managed) { + t.Helper() + bucket, _ := mg.(*v1alpha1.Bucket) + assert.True(t, + bucket.Status.Conditions[0].Equal(v1.Available()), + "unexpected bucket ready condition") + + assert.True(t, + bucket.Status.Conditions[1].Equal(v1.ReconcileSuccess()), + "unexpected bucket synced condition") + + assert.True(t, + bucket.Status.AtProvider.Backends["s3-backend-1"].ObjectLockConfigurationCondition.Equal(v1.Available()), + "object lock configuration condition on s3-backend-1 is not available") + + assert.Equal(t, + map[string]string{ + meta.AnnotationKeyReconciliationPaused: True, + "provider-ceph.backends.s3-backend-1": True, + }, + bucket.Labels, + "unexpected bucket labels", + ) + }, + }, + }, + } + + bk := &v1alpha1.Bucket{} + s := scheme.Scheme + s.AddKnownTypes(apisv1alpha1.SchemeGroupVersion, bk) + + for name, tc := range cases { + tc := tc + + t.Run(name, func(t *testing.T) { + t.Parallel() + + cl := fake.NewClientBuilder(). + WithObjects(tc.fields.initObjects...). + WithStatusSubresource(tc.fields.initObjects...). + WithScheme(s).Build() + + s3ClientHandler := s3clienthandler.NewHandler( + s3clienthandler.WithAssumeRoleArn(tc.fields.roleArn), + s3clienthandler.WithBackendStore(tc.fields.backendStore), + s3clienthandler.WithKubeClient(cl)) + + e := external{ + kubeClient: cl, + backendStore: tc.fields.backendStore, + s3ClientHandler: s3ClientHandler, + autoPauseBucket: tc.fields.autoPauseBucket, + minReplicas: 1, + log: logging.NewNopLogger(), + subresourceClients: NewSubresourceClients(tc.fields.backendStore, s3ClientHandler, logging.NewNopLogger()), + } + + got, err := e.Update(context.Background(), tc.args.mg) + require.ErrorIs(t, err, tc.want.err, "unexpected err") + assert.Equal(t, got, tc.want.o, "unexpected result") + if tc.want.specificDiff != nil { + tc.want.specificDiff(t, tc.args.mg) + } + }) + } +} diff --git a/internal/controller/bucket/versioningconfiguration.go b/internal/controller/bucket/versioningconfiguration.go index ec9bf4f3..a5f41aff 100644 --- a/internal/controller/bucket/versioningconfiguration.go +++ b/internal/controller/bucket/versioningconfiguration.go @@ -66,7 +66,7 @@ func (l *VersioningConfigurationClient) Observe(ctx context.Context, bucket *v1a return NeedsUpdate, err case observation := <-observationChan: - if observation != Updated { + if observation == NeedsUpdate || observation == NeedsDeletion { return observation, nil } case err := <-errChan: @@ -84,11 +84,11 @@ func (l *VersioningConfigurationClient) observeBackend(ctx context.Context, buck l.log.Info("Observing subresource versioning configuration on backend", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) if l.backendStore.GetBackendHealthStatus(backendName) == apisv1alpha1.HealthStatusUnhealthy { - // If a backend is marked as unhealthy, we can ignore it for now by returning Updated. + // If a backend is marked as unhealthy, we can ignore it for now by returning NoAction. // The backend may be down for some time and we do not want to block Create/Update/Delete // calls on other backends. By returning NeedsUpdate here, we would never pass the Observe // phase until the backend becomes Healthy or Disabled. - return Updated, nil + return NoAction, nil } s3Client, err := l.s3ClientHandler.GetS3Client(ctx, bucket, backendName) @@ -100,17 +100,19 @@ func (l *VersioningConfigurationClient) observeBackend(ctx context.Context, buck return NeedsUpdate, err } - if bucket.Spec.ForProvider.VersioningConfiguration == nil { - // No versioining config was defined by the user in the Bucket CR Spec. - // This is should result in (a) an unversioned bucket remaining unversioned - // OR (b) a versioned bucket having versioning suspended. + if bucket.Spec.ForProvider.VersioningConfiguration == nil && + (bucket.Spec.ForProvider.ObjectLockEnabledForBucket == nil || !*bucket.Spec.ForProvider.ObjectLockEnabledForBucket) { + // No versioining config was defined by the user in the Bucket CR Spec and + // object lock was not enabled for the bucket. This is should result in + // (a) an unversioned bucket remaining unversioned OR (b) a versioned bucket + // having versioning suspended. if response == nil || (response.Status == "" && response.MFADelete == "") { // An empty versioning configuration was returned from the backend, signifying // that versioning was never enabled on this bucket. Therefore versioning is // considered Updated for the bucket and we do nothing. l.log.Info("Versioning is not enabled for bucket on backend - no action required", consts.KeyBucketName, bucket.Name, consts.KeyBackendName, backendName) - return Updated, nil + return NoAction, nil } else { // A non-empty versioning configuration was returned from the backend, signifying // that versioning was previously enabled for this bucket. A bucket cannot be un-versioned, @@ -151,7 +153,14 @@ func (l *VersioningConfigurationClient) Handle(ctx context.Context, b *v1alpha1. } switch observation { + case NoAction: + return nil case Updated: + // The versioning config is updated, so we can consider this + // sub resource Available. + available := xpv1.Available() + bb.setVersioningConfigCondition(b.Name, backendName, &available) + return nil case NeedsDeletion: // Versioning Configurations are not deleted, only suspended, which requires an update. @@ -182,17 +191,38 @@ func (l *VersioningConfigurationClient) Handle(ctx context.Context, b *v1alpha1. return nil case NeedsUpdate: - if err := l.createOrUpdate(ctx, b, backendName); err != nil { + bucketCopy := b.DeepCopy() + + // If no versioning configuration was specified, but object lock is enabled + // for the bucket, then versioning should be enabled without mfa delete. + // Create a deep copy of bucket and give it an enabled version config. + // This will be used in th PutBucketVersioning request to enable versioning. + // If objectLockEnabledForBucket was true upon bucket creation, then this + // versioning configuration should already exist. But we perform the operation + // anyway to make sure, as it is idempotent. + if b.Spec.ForProvider.VersioningConfiguration == nil && + b.Spec.ForProvider.ObjectLockEnabledForBucket != nil && + *b.Spec.ForProvider.ObjectLockEnabledForBucket { + enabled := v1alpha1.VersioningStatusEnabled + disabled := v1alpha1.MFADeleteDisabled + + bucketCopy.Spec.ForProvider.VersioningConfiguration = &v1alpha1.VersioningConfiguration{ + MFADelete: &disabled, + Status: &enabled, + } + } + + if err := l.createOrUpdate(ctx, bucketCopy, backendName); err != nil { err = errors.Wrap(err, errHandleVersioningConfig) unavailable := xpv1.Unavailable().WithMessage(err.Error()) - bb.setVersioningConfigCondition(b.Name, backendName, &unavailable) + bb.setVersioningConfigCondition(bucketCopy.Name, backendName, &unavailable) traces.SetAndRecordError(span, err) return err } available := xpv1.Available() - bb.setVersioningConfigCondition(b.Name, backendName, &available) + bb.setVersioningConfigCondition(bucketCopy.Name, backendName, &available) } return nil diff --git a/internal/controller/bucket/versioningconfiguration_test.go b/internal/controller/bucket/versioningconfiguration_test.go index 0dff9253..d963f509 100644 --- a/internal/controller/bucket/versioningconfiguration_test.go +++ b/internal/controller/bucket/versioningconfiguration_test.go @@ -39,6 +39,8 @@ import ( var ( mfaDeleteEnabled = v1alpha1.MFADeleteEnabled vStatusEnabled = v1alpha1.VersioningStatusEnabled + enabledTrue = true + enabledFalse = false ) func TestVersioningConfigObserveBackend(t *testing.T) { @@ -93,7 +95,7 @@ func TestVersioningConfigObserveBackend(t *testing.T) { err: errExternal, }, }, - "Attempt to observe versioniong config on unhealthy backend (consider it updated to unblock)": { + "Attempt to observe versioniong config on unhealthy backend (consider it NoAction to unblock)": { fields: fields{ backendStore: func() *backendstore.BackendStore { fake := backendstorefakes.FakeS3Client{} @@ -113,7 +115,7 @@ func TestVersioningConfigObserveBackend(t *testing.T) { backendName: "s3-backend-1", }, want: want{ - status: Updated, + status: NoAction, err: nil, }, }, @@ -173,7 +175,7 @@ func TestVersioningConfigObserveBackend(t *testing.T) { backendName: "s3-backend-1", }, want: want{ - status: Updated, + status: NoAction, err: nil, }, }, @@ -257,6 +259,43 @@ func TestVersioningConfigObserveBackend(t *testing.T) { err: nil, }, }, + "Versioning config not specified in CR but object lock enabled so NeedsUpdate": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + + GetBucketVersioningStub: func(ctx context.Context, lci *s3.GetBucketVersioningInput, f ...func(*s3.Options)) (*s3.GetBucketVersioningOutput, error) { + return &s3.GetBucketVersioningOutput{ + Status: "Enabled", + MFADelete: "Disabled", + }, nil + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + status: NeedsUpdate, + err: nil, + }, + }, } for name, tc := range cases { tc := tc @@ -304,6 +343,42 @@ func TestVersioningConfigurationHandle(t *testing.T) { args args want want }{ + "Object lock enabled for bucket but no versioning config so set default enabled versioning": { + fields: fields{ + backendStore: func() *backendstore.BackendStore { + fake := backendstorefakes.FakeS3Client{ + + GetBucketVersioningStub: func(ctx context.Context, lci *s3.GetBucketVersioningInput, f ...func(*s3.Options)) (*s3.GetBucketVersioningOutput, error) { + return &s3.GetBucketVersioningOutput{ + MFADelete: s3types.MFADeleteStatusEnabled, + Status: s3types.BucketVersioningStatusEnabled, + }, nil + }, + } + + bs := backendstore.NewBackendStore() + bs.AddOrUpdateBackend("s3-backend-1", &fake, nil, true, apisv1alpha1.HealthStatusHealthy) + + return bs + }(), + }, + args: args{ + bucket: &v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + }, + Spec: v1alpha1.BucketSpec{ + ForProvider: v1alpha1.BucketParameters{ + ObjectLockEnabledForBucket: &enabledTrue, + }, + }, + }, + backendName: "s3-backend-1", + }, + want: want{ + err: nil, + }, + }, "Versioning config suspends successfully": { fields: fields{ backendStore: func() *backendstore.BackendStore { diff --git a/internal/rgw/objectlockconfiguration.go b/internal/rgw/objectlockconfiguration.go new file mode 100644 index 00000000..83c231ef --- /dev/null +++ b/internal/rgw/objectlockconfiguration.go @@ -0,0 +1,48 @@ +package rgw + +import ( + "context" + + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" + "github.com/linode/provider-ceph/internal/backendstore" + "github.com/linode/provider-ceph/internal/otel/traces" + "go.opentelemetry.io/otel" +) + +const ( + errGetObjectLockConfiguration = "failed to get object lock configuration" + errPutObjectLockConfiguration = "failed to put object lock configuration" +) + +func PutObjectLockConfiguration(ctx context.Context, s3Backend backendstore.S3Client, b *v1alpha1.Bucket) (*awss3.PutObjectLockConfigurationOutput, error) { + ctx, span := otel.Tracer("").Start(ctx, "PutObjectLockConfiguration") + defer span.End() + + resp, err := s3Backend.PutObjectLockConfiguration(ctx, GeneratePutObjectLockConfigurationInput(b.Name, b.Spec.ForProvider.ObjectLockConfiguration)) + if err != nil { + err := errors.Wrap(err, errPutObjectLockConfiguration) + traces.SetAndRecordError(span, err) + + return resp, err + } + + return resp, nil +} + +func GetObjectLockConfiguration(ctx context.Context, s3Backend backendstore.S3Client, bucketName *string) (*awss3.GetObjectLockConfigurationOutput, error) { + ctx, span := otel.Tracer("").Start(ctx, "GetObjectLockConfiguration") + defer span.End() + + resp, err := s3Backend.GetObjectLockConfiguration(ctx, &awss3.GetObjectLockConfigurationInput{Bucket: bucketName}) + if resource.IgnoreAny(err, ObjectLockConfigurationNotFound, IsBucketNotFound) != nil { + err = errors.Wrap(err, errGetObjectLockConfiguration) + traces.SetAndRecordError(span, err) + + return resp, err + } + + return resp, nil +} diff --git a/internal/rgw/objectlockconfiguration_helpers.go b/internal/rgw/objectlockconfiguration_helpers.go new file mode 100644 index 00000000..0124772c --- /dev/null +++ b/internal/rgw/objectlockconfiguration_helpers.go @@ -0,0 +1,55 @@ +package rgw + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" +) + +// GeneratePutObjectLockConfigurationInput creates the PutObjectLockConfiguration for the AWS SDK +func GeneratePutObjectLockConfigurationInput(name string, config *v1alpha1.ObjectLockConfiguration) *awss3.PutObjectLockConfigurationInput { + return &awss3.PutObjectLockConfigurationInput{ + Bucket: aws.String(name), + ObjectLockConfiguration: GenerateObjectLockConfiguration(config), + } +} + +func GenerateObjectLockConfiguration(inputConfig *v1alpha1.ObjectLockConfiguration) *types.ObjectLockConfiguration { + if inputConfig == nil { + return nil + } + + outputConfig := &types.ObjectLockConfiguration{} + if inputConfig.ObjectLockEnabled != nil { + outputConfig.ObjectLockEnabled = types.ObjectLockEnabled(*inputConfig.ObjectLockEnabled) + } + //nolint:nestif // Multiple checks required + if inputConfig.Rule != nil { + outputConfig.Rule = &types.ObjectLockRule{} + if inputConfig.Rule.DefaultRetention != nil { + outputConfig.Rule.DefaultRetention = &types.DefaultRetention{} + outputConfig.Rule.DefaultRetention.Mode = types.ObjectLockRetentionMode(inputConfig.Rule.DefaultRetention.Mode) + if inputConfig.Rule.DefaultRetention.Days != nil { + outputConfig.Rule.DefaultRetention.Days = inputConfig.Rule.DefaultRetention.Days + } + if inputConfig.Rule.DefaultRetention.Years != nil { + outputConfig.Rule.DefaultRetention.Years = inputConfig.Rule.DefaultRetention.Years + } + } + } + + return outputConfig +} + +// ObjectLockConfigurationNotfoundErrCode is the error code sent by Ceph when the object lock config does not exist +var ObjectLockConfigurationNotFoundErrCode = "ObjectLockConfigurationNotFoundError" + +// ObjectLockConfigurationNotFound is parses the error and validates if the object lock configuration does not exist +func ObjectLockConfigurationNotFound(err error) bool { + var awsErr smithy.APIError + + return errors.As(err, &awsErr) && awsErr.ErrorCode() == ObjectLockConfigurationNotFoundErrCode +} diff --git a/package/crds/provider-ceph.ceph.crossplane.io_buckets.yaml b/package/crds/provider-ceph.ceph.crossplane.io_buckets.yaml index c8cc6b9b..5403c48a 100644 --- a/package/crds/provider-ceph.ceph.crossplane.io_buckets.yaml +++ b/package/crds/provider-ceph.ceph.crossplane.io_buckets.yaml @@ -455,9 +455,60 @@ spec: locationConstraint: description: Specifies the Region where the bucket will be created. type: string + objectLockConfiguration: + description: ObjectLockConfiguration describes the desired object + lock state of an S3 bucket. + properties: + objectLockEnabled: + description: |- + Indicates whether this bucket has an Object Lock configuration enabled. Enable + ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + enum: + - Enabled + type: string + objectLockRule: + description: |- + Specifies the Object Lock rule for the specified object. Enable this rule + when you apply ObjectLockConfiguration to a bucket. Bucket settings require + both a mode and a period. The period can be either Days or Years but you must + select one. You cannot specify Days and Years at the same time. + properties: + defaultRetention: + description: |- + The default Object Lock retention mode and period that you want to apply to new + objects placed in the specified bucket. Bucket settings require both a mode and + a period. The period can be either Days or Years but you must select one. You + cannot specify Days and Years at the same time. + properties: + days: + description: |- + The number of days that you want to specify for the default retention period. + Must be used with Mode. + format: int32 + type: integer + mode: + description: |- + The default Object Lock retention mode you want to apply to new objects placed + in the specified bucket. Must be used with either Days or Years. + enum: + - GOVERNANCE + - COMPLIANCE + type: string + years: + description: |- + The number of years that you want to specify for the default retention period. + Must be used with Mode. + format: int32 + type: integer + type: object + type: object + type: object objectLockEnabledForBucket: description: Specifies whether you want S3 Object Lock to be enabled for the new bucket. + enum: + - true + - "null" type: boolean objectOwnership: description: |- @@ -772,6 +823,42 @@ spec: - status - type type: object + objectLockConfigurationCondition: + description: |- + ObjectLockConfigurationCondition is the condition of the object lock + configuration on the S3 backend. Use a pointer to allow nil value when + there is no object lock configuration. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition + from one status to another. + type: string + status: + description: Status of this condition; is it currently + True, False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object versioningConfigurationCondition: description: |- VersioningConfigurationCondition is the condition of the versioning