diff --git a/api/v1alpha2/linodeobjectstoragebucket_types.go b/api/v1alpha2/linodeobjectstoragebucket_types.go
index a7770f013..4a4867aaa 100644
--- a/api/v1alpha2/linodeobjectstoragebucket_types.go
+++ b/api/v1alpha2/linodeobjectstoragebucket_types.go
@@ -29,6 +29,8 @@ const (
ACLPublicRead ObjectStorageACL = "public-read"
ACLAuthenticatedRead ObjectStorageACL = "authenticated-read"
ACLPublicReadWrite ObjectStorageACL = "public-read-write"
+
+ BucketFinalizer = "linodeobjectstoragebucket.infrastructure.cluster.x-k8s.io"
)
// LinodeObjectStorageBucketSpec defines the desired state of LinodeObjectStorageBucket
@@ -55,6 +57,14 @@ type LinodeObjectStorageBucketSpec struct {
// If not supplied then the credentials of the controller will be used.
// +optional
CredentialsRef *corev1.SecretReference `json:"credentialsRef"`
+
+ // AccessKeyRef is a reference to a LinodeObjectStorageBucketKey for the bucket.
+ // +optional
+ AccessKeyRef *corev1.ObjectReference `json:"accessKeyRef"`
+
+ // ForceDeleteBucket enables the object storage bucket used to be deleted even if it contains objects.
+ // +optional
+ ForceDeleteBucket bool `json:"forceDeleteBucket,omitempty"`
}
// LinodeObjectStorageBucketStatus defines the observed state of LinodeObjectStorageBucket
diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go
index 04bd0dc34..dc42a51bf 100644
--- a/api/v1alpha2/zz_generated.deepcopy.go
+++ b/api/v1alpha2/zz_generated.deepcopy.go
@@ -1109,6 +1109,11 @@ func (in *LinodeObjectStorageBucketSpec) DeepCopyInto(out *LinodeObjectStorageBu
*out = new(v1.SecretReference)
**out = **in
}
+ if in.AccessKeyRef != nil {
+ in, out := &in.AccessKeyRef, &out.AccessKeyRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinodeObjectStorageBucketSpec.
diff --git a/clients/clients.go b/clients/clients.go
index 4a4837edd..8e13573df 100644
--- a/clients/clients.go
+++ b/clients/clients.go
@@ -86,6 +86,7 @@ type LinodeObjectStorageClient interface {
GetObjectStorageKey(ctx context.Context, keyID int) (*linodego.ObjectStorageKey, error)
CreateObjectStorageKey(ctx context.Context, opts linodego.ObjectStorageKeyCreateOptions) (*linodego.ObjectStorageKey, error)
DeleteObjectStorageKey(ctx context.Context, keyID int) error
+ DeleteObjectStorageBucket(ctx context.Context, regionID, label string) error
}
// LinodeDNSClient defines the methods that interact with Linode's Domains service.
@@ -128,6 +129,10 @@ type S3Client interface {
DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error)
PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)
HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
+ GetBucketVersioning(ctx context.Context, params *s3.GetBucketVersioningInput, optFns ...func(*s3.Options)) (*s3.GetBucketVersioningOutput, error)
+ DeleteObjects(ctx context.Context, params *s3.DeleteObjectsInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)
+ ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
+ ListObjectVersions(ctx context.Context, params *s3.ListObjectVersionsInput, f ...func(*s3.Options)) (*s3.ListObjectVersionsOutput, error)
}
type S3PresignClient interface {
diff --git a/cloud/scope/object_storage_bucket.go b/cloud/scope/object_storage_bucket.go
index b8b36d199..b7773bcf7 100644
--- a/cloud/scope/object_storage_bucket.go
+++ b/cloud/scope/object_storage_bucket.go
@@ -8,6 +8,8 @@ import (
"github.com/go-logr/logr"
"sigs.k8s.io/cluster-api/util/patch"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2"
"github.com/linode/cluster-api-provider-linode/clients"
@@ -77,6 +79,16 @@ func NewObjectStorageBucketScope(ctx context.Context, linodeClientConfig ClientC
}, nil
}
+// AddFinalizer adds a finalizer if not present and immediately patches the
+// object to avoid any race conditions.
+func (s *ObjectStorageBucketScope) AddFinalizer(ctx context.Context) error {
+ if controllerutil.AddFinalizer(s.Bucket, infrav1alpha2.BucketFinalizer) {
+ return s.Close(ctx)
+ }
+
+ return nil
+}
+
// PatchObject persists the object storage bucket configuration and status.
func (s *ObjectStorageBucketScope) PatchObject(ctx context.Context) error {
return s.PatchHelper.Patch(ctx, s.Bucket)
@@ -86,3 +98,56 @@ func (s *ObjectStorageBucketScope) PatchObject(ctx context.Context) error {
func (s *ObjectStorageBucketScope) Close(ctx context.Context) error {
return s.PatchObject(ctx)
}
+
+// AddAccessKeyRefFinalizer adds a finalizer to the linodeobjectstoragekey referenced in spec.AccessKeyRef.
+func (s *ObjectStorageBucketScope) AddAccessKeyRefFinalizer(ctx context.Context, finalizer string) error {
+ obj, err := s.getAccessKey(ctx)
+ if err != nil {
+ return err
+ }
+
+ controllerutil.AddFinalizer(obj, finalizer)
+ if err := s.Client.Update(ctx, obj); err != nil {
+ return fmt.Errorf("add linodeobjectstoragekey finalizer %s/%s: %w", s.Bucket.Spec.AccessKeyRef.Namespace, s.Bucket.Spec.AccessKeyRef.Name, err)
+ }
+
+ return nil
+}
+
+// RemoveAccessKeyRefFinalizer removes a finalizer from the linodeobjectstoragekey referenced in spec.AccessKeyRef.
+func (s *ObjectStorageBucketScope) RemoveAccessKeyRefFinalizer(ctx context.Context, finalizer string) error {
+ obj, err := s.getAccessKey(ctx)
+ if err != nil {
+ return err
+ }
+
+ controllerutil.RemoveFinalizer(obj, finalizer)
+ if err := s.Client.Update(ctx, obj); err != nil {
+ return fmt.Errorf("remove linodeobjectstoragekey finalizer %s/%s: %w", s.Bucket.Spec.AccessKeyRef.Namespace, s.Bucket.Spec.AccessKeyRef.Name, err)
+ }
+
+ return nil
+}
+
+func (s *ObjectStorageBucketScope) getAccessKey(ctx context.Context) (*infrav1alpha2.LinodeObjectStorageKey, error) {
+ if s.Bucket.Spec.AccessKeyRef == nil {
+ return nil, fmt.Errorf("accessKeyRef is nil for bucket %s", s.Bucket.Name)
+ }
+
+ objKeyNamespace := s.Bucket.Spec.AccessKeyRef.Namespace
+ if s.Bucket.Spec.AccessKeyRef.Namespace == "" {
+ objKeyNamespace = s.Bucket.Namespace
+ }
+
+ objKey := client.ObjectKey{
+ Name: s.Bucket.Spec.AccessKeyRef.Name,
+ Namespace: objKeyNamespace,
+ }
+
+ objStorageKey := &infrav1alpha2.LinodeObjectStorageKey{}
+ if err := s.Client.Get(ctx, objKey, objStorageKey); err != nil {
+ return nil, fmt.Errorf("get linodeobjectstoragekey %s: %w", s.Bucket.Spec.AccessKeyRef.Name, err)
+ }
+
+ return objStorageKey, nil
+}
diff --git a/cloud/scope/object_storage_bucket_test.go b/cloud/scope/object_storage_bucket_test.go
index 585790373..073392d9f 100644
--- a/cloud/scope/object_storage_bucket_test.go
+++ b/cloud/scope/object_storage_bucket_test.go
@@ -7,11 +7,16 @@ import (
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2"
"github.com/linode/cluster-api-provider-linode/mock"
@@ -232,3 +237,283 @@ func TestNewObjectStorageBucketScope(t *testing.T) {
})
}
}
+
+func TestAddFinalizer(t *testing.T) {
+ t.Parallel()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockK8sClient := mock.NewMockK8sClient(ctrl)
+ mockK8sClient.EXPECT().Scheme().AnyTimes().DoAndReturn(func() *runtime.Scheme {
+ s := runtime.NewScheme()
+ infrav1alpha2.AddToScheme(s)
+ return s
+ })
+ mockK8sClient.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
+
+ bucket := &infrav1alpha2.LinodeObjectStorageBucket{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket",
+ Namespace: "test-namespace",
+ },
+ }
+ scope, err := NewObjectStorageBucketScope(t.Context(), ClientConfig{Token: "test-token"}, ObjectStorageBucketScopeParams{
+ Client: mockK8sClient,
+ Bucket: bucket,
+ Logger: &logr.Logger{},
+ })
+ require.NoError(t, err)
+
+ err = scope.AddFinalizer(t.Context())
+ require.NoError(t, err)
+}
+
+func TestAddAccessKeyRefFinalizer(t *testing.T) {
+ t.Parallel()
+ type args struct {
+ apiKey string
+ params ObjectStorageBucketScopeParams
+ }
+ tests := []struct {
+ name string
+ args args
+ expectedErr error
+ expects func(k8s *mock.MockK8sClient)
+ clientBuildFunc func(apiKey string) (LinodeClient, error)
+ }{
+ {
+ name: "Success - no AccessKeyRef",
+ args: args{
+ apiKey: "apikey",
+ params: ObjectStorageBucketScopeParams{
+ Client: nil,
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{},
+ Logger: &logr.Logger{},
+ },
+ },
+ expectedErr: nil,
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
+ s := runtime.NewScheme()
+ infrav1alpha2.AddToScheme(s)
+ return s
+ })
+ },
+ },
+ {
+ name: "Success - valid AccessKeyRef",
+ args: args{
+ apiKey: "apikey",
+ params: ObjectStorageBucketScopeParams{
+ Client: nil,
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ AccessKeyRef: &corev1.ObjectReference{
+ Name: "example",
+ Namespace: "test",
+ },
+ },
+ },
+ Logger: &logr.Logger{},
+ },
+ },
+ expectedErr: nil,
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
+ s := runtime.NewScheme()
+ infrav1alpha2.AddToScheme(s)
+ return s
+ })
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, name types.NamespacedName, obj *infrav1alpha2.LinodeObjectStorageKey, opts ...client.GetOption) error {
+ cred := infrav1alpha2.LinodeObjectStorageKey{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "example",
+ Namespace: "test",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageKeySpec{
+ BucketAccess: []infrav1alpha2.BucketAccessRef{{
+ BucketName: "test-bucket",
+ Permissions: "read_write",
+ Region: "region",
+ }},
+ },
+ }
+ *obj = cred
+ return nil
+ })
+ k8s.EXPECT().Update(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
+ // Simulate adding a finalizer
+ controllerutil.AddFinalizer(obj, "test-bucket")
+ return nil
+ })
+ },
+ },
+ {
+ name: "Error - accessKeyRef doesn't exist",
+ args: args{
+ apiKey: "test-key",
+ params: ObjectStorageBucketScopeParams{
+ Client: nil,
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ AccessKeyRef: &corev1.ObjectReference{
+ Name: "example",
+ Namespace: "test",
+ },
+ },
+ },
+ Logger: &logr.Logger{},
+ },
+ },
+ expectedErr: fmt.Errorf("not found"),
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
+ s := runtime.NewScheme()
+ infrav1alpha2.AddToScheme(s)
+ return s
+ })
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, ""))
+ },
+ },
+ }
+ for _, tt := range tests {
+ testcase := tt
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockK8sClient := mock.NewMockK8sClient(ctrl)
+
+ testcase.expects(mockK8sClient)
+
+ testcase.args.params.Client = mockK8sClient
+
+ scope, err := NewObjectStorageBucketScope(t.Context(), ClientConfig{Token: testcase.args.apiKey}, testcase.args.params)
+ require.NoError(t, err)
+
+ err = scope.AddAccessKeyRefFinalizer(t.Context(), tt.args.params.Bucket.Name)
+
+ if testcase.expectedErr != nil {
+ assert.ErrorContains(t, err, testcase.expectedErr.Error())
+ }
+ })
+ }
+}
+
+func TestRemoveAccessKeyRefFinalizer(t *testing.T) {
+ t.Parallel()
+ type args struct {
+ apiKey string
+ params ObjectStorageBucketScopeParams
+ }
+ tests := []struct {
+ name string
+ args args
+ expectedErr error
+ expects func(k8s *mock.MockK8sClient)
+ clientBuildFunc func(apiKey string) (LinodeClient, error)
+ }{
+ {
+ name: "Success - valid AccessKeyRef",
+ args: args{
+ apiKey: "apikey",
+ params: ObjectStorageBucketScopeParams{
+ Client: nil,
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ AccessKeyRef: &corev1.ObjectReference{
+ Name: "example",
+ },
+ },
+ },
+ Logger: &logr.Logger{},
+ },
+ },
+ expectedErr: nil,
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
+ s := runtime.NewScheme()
+ infrav1alpha2.AddToScheme(s)
+ return s
+ })
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, name types.NamespacedName, obj *infrav1alpha2.LinodeObjectStorageKey, opts ...client.GetOption) error {
+ cred := infrav1alpha2.LinodeObjectStorageKey{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "example",
+ Namespace: "test",
+ Finalizers: []string{"test-bucket"},
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageKeySpec{
+ BucketAccess: []infrav1alpha2.BucketAccessRef{{
+ BucketName: "test-bucket",
+ Permissions: "read_write",
+ Region: "region",
+ }},
+ },
+ }
+ *obj = cred
+ return nil
+ })
+ k8s.EXPECT().Update(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
+ // Simulate adding a finalizer
+ controllerutil.AddFinalizer(obj, "test-bucket")
+ return nil
+ })
+ },
+ },
+ {
+ name: "Error - accessKeyRef doesn't exist",
+ args: args{
+ apiKey: "test-key",
+ params: ObjectStorageBucketScopeParams{
+ Client: nil,
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ AccessKeyRef: &corev1.ObjectReference{
+ Name: "example",
+ Namespace: "test",
+ },
+ },
+ },
+ Logger: &logr.Logger{},
+ },
+ },
+ expectedErr: fmt.Errorf("not found"),
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Scheme().DoAndReturn(func() *runtime.Scheme {
+ s := runtime.NewScheme()
+ infrav1alpha2.AddToScheme(s)
+ return s
+ })
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(apierrors.NewNotFound(schema.GroupResource{}, ""))
+ },
+ },
+ }
+ for _, tt := range tests {
+ testcase := tt
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockK8sClient := mock.NewMockK8sClient(ctrl)
+
+ testcase.expects(mockK8sClient)
+
+ testcase.args.params.Client = mockK8sClient
+
+ scope, err := NewObjectStorageBucketScope(t.Context(), ClientConfig{Token: testcase.args.apiKey}, testcase.args.params)
+ require.NoError(t, err)
+
+ err = scope.RemoveAccessKeyRefFinalizer(t.Context(), tt.args.params.Bucket.Name)
+
+ if testcase.expectedErr != nil {
+ assert.ErrorContains(t, err, testcase.expectedErr.Error())
+ }
+ })
+ }
+}
diff --git a/cloud/services/object_storage_buckets.go b/cloud/services/object_storage_buckets.go
index 528970da7..4f57f519e 100644
--- a/cloud/services/object_storage_buckets.go
+++ b/cloud/services/object_storage_buckets.go
@@ -5,12 +5,19 @@ import (
"fmt"
"net/http"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsconfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/linode/linodego"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
"github.com/linode/cluster-api-provider-linode/cloud/scope"
"github.com/linode/cluster-api-provider-linode/util"
)
+// EnsureAndUpdateObjectStorageBucket ensures that the bucket exists and updates its access options if necessary.
func EnsureAndUpdateObjectStorageBucket(ctx context.Context, bScope *scope.ObjectStorageBucketScope) (*linodego.ObjectStorageBucket, error) {
bucket, err := bScope.LinodeClient.GetObjectStorageBucket(
ctx,
@@ -62,3 +69,57 @@ func EnsureAndUpdateObjectStorageBucket(ctx context.Context, bScope *scope.Objec
return bucket, nil
}
+
+// DeleteBucket deletes the bucket and all its objects.
+func DeleteBucket(ctx context.Context, bScope *scope.ObjectStorageBucketScope) error {
+ s3Client, err := createS3ClientWithAccessKey(ctx, bScope)
+ if err != nil {
+ return fmt.Errorf("failed to create S3 client: %w", err)
+ }
+ if err := PurgeAllObjects(ctx, bScope.Bucket.Name, s3Client, true, true); err != nil {
+ return fmt.Errorf("failed to purge all objects: %w", err)
+ }
+ bScope.Logger.Info("Purged all objects", "bucket", bScope.Bucket.Name)
+
+ if err := bScope.LinodeClient.DeleteObjectStorageBucket(ctx, bScope.Bucket.Spec.Region, bScope.Bucket.Name); err != nil {
+ return fmt.Errorf("failed to delete bucket: %w", err)
+ }
+ bScope.Logger.Info("Deleted empty bucket", "bucket", bScope.Bucket.Name)
+
+ return nil
+}
+
+// createS3ClientWithAccessKey creates a connection to s3 given k8s client and an access key reference.
+func createS3ClientWithAccessKey(ctx context.Context, bScope *scope.ObjectStorageBucketScope) (*s3.Client, error) {
+ if bScope.Bucket.Spec.AccessKeyRef == nil {
+ return nil, fmt.Errorf("accessKeyRef is nil")
+ }
+ objSecret := &corev1.Secret{}
+ if bScope.Bucket.Spec.AccessKeyRef.Namespace == "" {
+ bScope.Bucket.Spec.AccessKeyRef.Namespace = bScope.Bucket.Namespace
+ }
+ if err := bScope.Client.Get(ctx, types.NamespacedName{Name: bScope.Bucket.Spec.AccessKeyRef.Name + "-obj-key", Namespace: bScope.Bucket.Spec.AccessKeyRef.Namespace}, objSecret); err != nil {
+ return nil, fmt.Errorf("failed to get bucket secret: %w", err)
+ }
+
+ awsConfig, err := awsconfig.LoadDefaultConfig(
+ ctx,
+ awsconfig.WithCredentialsProvider(
+ credentials.NewStaticCredentialsProvider(
+ string(objSecret.Data["access"]),
+ string(objSecret.Data["secret"]),
+ ""),
+ ),
+ awsconfig.WithRegion("auto"),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create aws config: %w", err)
+ }
+
+ s3Client := s3.NewFromConfig(awsConfig, func(opts *s3.Options) {
+ opts.BaseEndpoint = aws.String(string(objSecret.Data["endpoint"]))
+ opts.DisableLogOutputChecksumValidationSkipped = true
+ })
+
+ return s3Client, nil
+}
diff --git a/cloud/services/object_storage_buckets_test.go b/cloud/services/object_storage_buckets_test.go
index 680608de9..24015c1f3 100644
--- a/cloud/services/object_storage_buckets_test.go
+++ b/cloud/services/object_storage_buckets_test.go
@@ -1,13 +1,23 @@
package services
import (
+ "context"
"fmt"
"testing"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
"github.com/linode/linodego"
"github.com/stretchr/testify/assert"
"go.uber.org/mock/gomock"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2"
"github.com/linode/cluster-api-provider-linode/cloud/scope"
@@ -204,3 +214,266 @@ func TestEnsureObjectStorageBucket(t *testing.T) {
})
}
}
+
+func TestCreateS3ClientWithAccessKey(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ bScope *scope.ObjectStorageBucketScope
+ expectedError error
+ expects func(client *mock.MockK8sClient)
+ }{
+ {
+ name: "Success - Successfully create client",
+ bScope: &scope.ObjectStorageBucketScope{
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ Region: "test-region",
+ AccessKeyRef: &v1.ObjectReference{
+ Name: "test",
+ },
+ },
+ },
+ },
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, name types.NamespacedName, obj *v1.Secret, opts ...client.GetOption) error {
+ secret := v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket-obj-key",
+ },
+ Data: map[string][]byte{
+ "access": []byte("test-access-key"),
+ "secret": []byte("test-secret-key"),
+ "bucket": []byte("test-bucket"),
+ },
+ }
+ *obj = secret
+ return nil
+ })
+ },
+ },
+ {
+ name: "Error - failed to get access key",
+ bScope: &scope.ObjectStorageBucketScope{
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ Region: "test-region",
+ AccessKeyRef: &v1.ObjectReference{
+ Name: "test",
+ },
+ },
+ },
+ },
+ expects: func(k8s *mock.MockK8sClient) {
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.NewNotFound(schema.GroupResource{}, ""))
+ },
+ expectedError: fmt.Errorf("failed to get bucket secret"),
+ },
+ {
+ name: "Error - access key is nil",
+ bScope: &scope.ObjectStorageBucketScope{
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ Region: "test-region",
+ },
+ },
+ },
+ expects: func(k8s *mock.MockK8sClient) {},
+ expectedError: fmt.Errorf("accessKeyRef is nil"),
+ },
+ }
+ for _, tt := range tests {
+ testcase := tt
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockClient := mock.NewMockK8sClient(ctrl)
+
+ testcase.bScope.Client = mockClient
+
+ testcase.expects(mockClient)
+
+ s3Client, err := createS3ClientWithAccessKey(t.Context(), testcase.bScope)
+ if testcase.expectedError != nil {
+ assert.ErrorContains(t, err, testcase.expectedError.Error())
+ } else {
+ assert.NotNil(t, s3Client)
+ }
+ })
+ }
+}
+
+func TestDeleteBucket(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ bScope *scope.ObjectStorageBucketScope
+ expectedError error
+ expects func(k8s *mock.MockK8sClient, lc *mock.MockLinodeClient)
+ }{
+ {
+ name: "Error - failed to purge all objects",
+ bScope: &scope.ObjectStorageBucketScope{
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ Region: "test-region",
+ AccessKeyRef: &v1.ObjectReference{
+ Name: "test-bucket",
+ },
+ },
+ },
+ },
+ expects: func(k8s *mock.MockK8sClient, lc *mock.MockLinodeClient) {
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, name types.NamespacedName, obj *v1.Secret, opts ...client.GetOption) error {
+ secret := v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket-obj-key",
+ },
+ Data: map[string][]byte{
+ "access": []byte("test-access-key"),
+ "secret": []byte("test-secret-key"),
+ "bucket": []byte("test-bucket"),
+ },
+ }
+ *obj = secret
+ return nil
+ })
+ },
+ expectedError: fmt.Errorf("failed to purge all objects"),
+ },
+ {
+ name: "Error - failed to create S3 client",
+ bScope: &scope.ObjectStorageBucketScope{
+ Bucket: &infrav1alpha2.LinodeObjectStorageBucket{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-bucket",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
+ Region: "test-region",
+ AccessKeyRef: &v1.ObjectReference{
+ Name: "test",
+ },
+ },
+ },
+ },
+ expects: func(k8s *mock.MockK8sClient, lc *mock.MockLinodeClient) {
+ k8s.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.NewNotFound(schema.GroupResource{}, ""))
+ },
+ expectedError: fmt.Errorf("failed to create S3 client"),
+ },
+ }
+ for _, tt := range tests {
+ testcase := tt
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockK8s := mock.NewMockK8sClient(ctrl)
+ testcase.bScope.Client = mockK8s
+ mockClient := mock.NewMockLinodeClient(ctrl)
+ testcase.bScope.LinodeClient = mockClient
+
+ testcase.expects(mockK8s, mockClient)
+
+ err := DeleteBucket(t.Context(), testcase.bScope)
+ if testcase.expectedError != nil {
+ assert.ErrorContains(t, err, testcase.expectedError.Error())
+ }
+ })
+ }
+}
+
+func TestPurgeAllObjects(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ bucketName string
+ s3Client *mock.MockS3Client
+ forceDelete bool
+ expectedError error
+ expects func(s3mock *mock.MockS3Client)
+ }{
+ {
+ name: "Success - Successfully purge all objects (versioning enabled)",
+ bucketName: "test-bucket",
+ forceDelete: true,
+ expects: func(s3mock *mock.MockS3Client) {
+ s3mock.EXPECT().GetBucketVersioning(gomock.Any(), gomock.Any()).Return(&s3.GetBucketVersioningOutput{
+ Status: s3types.BucketVersioningStatusEnabled,
+ ResultMetadata: middleware.Metadata{},
+ }, nil)
+ s3mock.EXPECT().ListObjectVersions(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.ListObjectVersionsOutput{
+ Versions: []s3types.ObjectVersion{
+ {Key: aws.String("object1"), VersionId: aws.String("version1")},
+ {Key: aws.String("object2"), VersionId: aws.String("version2")},
+ },
+ }, nil)
+ s3mock.EXPECT().DeleteObjects(gomock.Any(), gomock.Any()).Return(&s3.DeleteObjectsOutput{}, nil)
+ },
+ },
+ {
+ name: "Success - Successfully purge all objects",
+ bucketName: "test-bucket",
+ forceDelete: true,
+ expects: func(s3mock *mock.MockS3Client) {
+ s3mock.EXPECT().GetBucketVersioning(gomock.Any(), gomock.Any()).Return(&s3.GetBucketVersioningOutput{}, nil)
+ s3mock.EXPECT().ListObjectsV2(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.ListObjectsV2Output{
+ Contents: []s3types.Object{
+ {Key: aws.String("object1")},
+ {Key: aws.String("object2")},
+ },
+ }, nil)
+ s3mock.EXPECT().DeleteObjects(gomock.Any(), gomock.Any()).Return(&s3.DeleteObjectsOutput{}, nil)
+ },
+ },
+ {
+ name: "Error - Failed to list objects",
+ bucketName: "test-bucket",
+ forceDelete: true,
+ expectedError: fmt.Errorf("failed to list objects"),
+ expects: func(s3mock *mock.MockS3Client) {
+ s3mock.EXPECT().GetBucketVersioning(gomock.Any(), gomock.Any()).Return(&s3.GetBucketVersioningOutput{}, nil)
+ s3mock.EXPECT().ListObjectsV2(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("failed to list objects"))
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ testcase := tt
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockS3Client := mock.NewMockS3Client(ctrl)
+ testcase.s3Client = mockS3Client
+ testcase.expects(mockS3Client)
+
+ err := PurgeAllObjects(t.Context(), testcase.bucketName, testcase.s3Client, testcase.forceDelete, false)
+ if testcase.expectedError != nil {
+ assert.ErrorContains(t, err, testcase.expectedError.Error())
+ }
+ })
+ }
+}
diff --git a/cloud/services/object_storage_objects.go b/cloud/services/object_storage_objects.go
index a2c292bb4..39f294a82 100644
--- a/cloud/services/object_storage_objects.go
+++ b/cloud/services/object_storage_objects.go
@@ -9,9 +9,10 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
- "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
+ "github.com/linode/cluster-api-provider-linode/clients"
"github.com/linode/cluster-api-provider-linode/cloud/scope"
)
@@ -104,9 +105,9 @@ func DeleteObject(ctx context.Context, mscope *scope.MachineScope) error {
if err != nil {
var (
ae smithy.APIError
- bne *types.NoSuchBucket
- kne *types.NoSuchKey
- nf *types.NotFound
+ bne *s3types.NoSuchBucket
+ kne *s3types.NoSuchKey
+ nf *s3types.NotFound
)
switch {
// In the case that the IAM policy does not have sufficient permissions to get the object, we will attempt to
@@ -137,3 +138,141 @@ func DeleteObject(ctx context.Context, mscope *scope.MachineScope) error {
return nil
}
+
+// PurgeAllObjects wipes out all versions and delete markers for versioned objects.
+func PurgeAllObjects(
+ ctx context.Context,
+ bucket string,
+ s3client clients.S3Client,
+ bypassRetention,
+ ignoreNotFound bool,
+) error {
+ versioning, err := s3client.GetBucketVersioning(ctx, &s3.GetBucketVersioningInput{
+ Bucket: aws.String(bucket),
+ })
+ if err != nil {
+ return err
+ }
+
+ if versioning.Status == s3types.BucketVersioningStatusEnabled {
+ err = DeleteAllObjectVersionsAndDeleteMarkers(
+ ctx,
+ s3client,
+ bucket,
+ "",
+ bypassRetention,
+ ignoreNotFound,
+ )
+ } else {
+ err = DeleteAllObjects(ctx, s3client, bucket, bypassRetention)
+ }
+ return err
+}
+
+// DeleteAllObjects sends delete requests for every object.
+// Versioned objects will get a deletion marker instead of being fully purged.
+func DeleteAllObjects(
+ ctx context.Context,
+ s3client clients.S3Client,
+ bucketName string,
+ bypassRetention bool,
+) error {
+ objPaginator := s3.NewListObjectsV2Paginator(s3client, &s3.ListObjectsV2Input{
+ Bucket: aws.String(bucketName),
+ })
+
+ var objectsToDelete []s3types.ObjectIdentifier
+ for objPaginator.HasMorePages() {
+ page, err := objPaginator.NextPage(ctx)
+ if err != nil {
+ return err
+ }
+
+ for _, obj := range page.Contents {
+ objectsToDelete = append(objectsToDelete, s3types.ObjectIdentifier{
+ Key: obj.Key,
+ })
+ }
+ }
+
+ if len(objectsToDelete) == 0 {
+ return nil
+ }
+
+ _, err := s3client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
+ Bucket: aws.String(bucketName),
+ Delete: &s3types.Delete{Objects: objectsToDelete},
+ BypassGovernanceRetention: &bypassRetention,
+ })
+
+ return err
+}
+
+// DeleteAllObjectVersionsAndDeleteMarkers deletes all versions of a given object
+func DeleteAllObjectVersionsAndDeleteMarkers(ctx context.Context, client clients.S3Client, bucket, prefix string, bypassRetention, ignoreNotFound bool) error {
+ paginator := s3.NewListObjectVersionsPaginator(client, &s3.ListObjectVersionsInput{
+ Bucket: aws.String(bucket),
+ Prefix: aws.String(prefix),
+ })
+
+ var objectsToDelete []s3types.ObjectIdentifier
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
+ if err != nil {
+ if !IsObjNotFoundErr(err) || !ignoreNotFound {
+ return err
+ }
+ }
+ if page == nil {
+ continue
+ }
+
+ for _, version := range page.Versions {
+ objectsToDelete = append(
+ objectsToDelete,
+ s3types.ObjectIdentifier{
+ Key: version.Key,
+ VersionId: version.VersionId,
+ },
+ )
+ }
+ for _, marker := range page.DeleteMarkers {
+ objectsToDelete = append(
+ objectsToDelete,
+ s3types.ObjectIdentifier{
+ Key: marker.Key,
+ VersionId: marker.VersionId,
+ },
+ )
+ }
+ }
+
+ if len(objectsToDelete) == 0 {
+ return nil
+ }
+
+ _, err := client.DeleteObjects(
+ ctx,
+ &s3.DeleteObjectsInput{
+ Bucket: aws.String(bucket),
+ Delete: &s3types.Delete{Objects: objectsToDelete},
+ BypassGovernanceRetention: &bypassRetention,
+ },
+ )
+ if err != nil {
+ if !IsObjNotFoundErr(err) || !ignoreNotFound {
+ return err
+ }
+ }
+ return nil
+}
+
+// IsObjNotFoundErr checks if the error is a NotFound or Forbidden error from the S3 API.
+func IsObjNotFoundErr(err error) bool {
+ var apiErr smithy.APIError
+ // Error code is 'Forbidden' when the bucket has been removed
+ if errors.As(err, &apiErr) {
+ return apiErr.ErrorCode() == "NotFound" || apiErr.ErrorCode() == "Forbidden"
+ }
+ return false
+}
diff --git a/cloud/services/object_storage_objects_test.go b/cloud/services/object_storage_objects_test.go
index ebf494828..a8ddeb6eb 100644
--- a/cloud/services/object_storage_objects_test.go
+++ b/cloud/services/object_storage_objects_test.go
@@ -5,14 +5,17 @@ import (
"errors"
"testing"
+ "github.com/aws/aws-sdk-go-v2/aws"
awssigner "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
infrav1alpha2 "github.com/linode/cluster-api-provider-linode/api/v1alpha2"
@@ -413,3 +416,114 @@ func TestDeleteObject(t *testing.T) {
),
)
}
+
+func TestDeleteAllObjects(t *testing.T) {
+ t.Parallel()
+
+ NewSuite(t, mock.MockK8sClient{}, mock.MockS3Client{}).Run(
+ OneOf(
+ Path(
+ Call("fail to list objects", func(ctx context.Context, mck Mock) {
+ mck.S3Client.EXPECT().ListObjectsV2(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fail"))
+ }),
+ Result("error", func(ctx context.Context, mck Mock) {
+ err := DeleteAllObjects(ctx, mck.S3Client, "test", true)
+ assert.Error(t, err)
+ }),
+ ),
+ Path(
+ Call("no objects", func(ctx context.Context, mck Mock) {
+ mck.S3Client.EXPECT().ListObjectsV2(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.ListObjectsV2Output{}, nil)
+ }),
+ Result("success", func(ctx context.Context, mck Mock) {
+ err := DeleteAllObjects(ctx, mck.S3Client, "test", true)
+ assert.NoError(t, err)
+ }),
+ ),
+ ),
+ )
+}
+
+func TestDeleteAllObjectVersionsAndDeleteMarkers(t *testing.T) {
+ t.Parallel()
+
+ NewSuite(t, mock.MockK8sClient{}, mock.MockS3Client{}).Run(
+ OneOf(
+ Path(
+ Call("fail to list object versions", func(ctx context.Context, mck Mock) {
+ mck.S3Client.EXPECT().ListObjectVersions(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fail"))
+ }),
+ Result("error", func(ctx context.Context, mck Mock) {
+ err := DeleteAllObjectVersionsAndDeleteMarkers(ctx, mck.S3Client, "test", "", true, false)
+ assert.Error(t, err)
+ }),
+ ),
+ Path(
+ Call("no objects", func(ctx context.Context, mck Mock) {
+ mck.S3Client.EXPECT().ListObjectVersions(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.ListObjectVersionsOutput{}, nil)
+ }),
+ Result("error", func(ctx context.Context, mck Mock) {
+ err := DeleteAllObjectVersionsAndDeleteMarkers(ctx, mck.S3Client, "test", "", true, false)
+ assert.NoError(t, err)
+ }),
+ ),
+ Path(
+ Call("with an object", func(ctx context.Context, mck Mock) {
+ mck.S3Client.EXPECT().ListObjectVersions(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.ListObjectVersionsOutput{
+ Name: ptr.To("test"),
+ Versions: []types.ObjectVersion{
+ {
+ IsLatest: aws.Bool(true),
+ Key: aws.String("test"),
+ VersionId: aws.String("version2"),
+ },
+ },
+ ResultMetadata: middleware.Metadata{},
+ }, nil)
+ mck.S3Client.EXPECT().DeleteObjects(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.DeleteObjectsOutput{}, nil)
+ }),
+ Result("success", func(ctx context.Context, mck Mock) {
+ err := DeleteAllObjectVersionsAndDeleteMarkers(ctx, mck.S3Client, "test", "", true, false)
+ assert.NoError(t, err)
+ }),
+ ),
+ Path(
+ Call("with versions and delete markers", func(ctx context.Context, mck Mock) {
+ mck.S3Client.EXPECT().ListObjectVersions(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.ListObjectVersionsOutput{
+ Name: ptr.To("test"),
+ Versions: []types.ObjectVersion{
+ {
+ IsLatest: aws.Bool(false),
+ Key: aws.String("test"),
+ VersionId: aws.String("version1"),
+ },
+ {
+ IsLatest: aws.Bool(true),
+ Key: aws.String("test"),
+ VersionId: aws.String("version2"),
+ },
+ },
+ ResultMetadata: middleware.Metadata{},
+ DeleteMarkers: []types.DeleteMarkerEntry{
+ {
+ IsLatest: aws.Bool(false),
+ Key: aws.String("test"),
+ VersionId: aws.String("version1"),
+ },
+ {
+ IsLatest: aws.Bool(true),
+ Key: aws.String("test"),
+ VersionId: aws.String("version2"),
+ },
+ },
+ }, nil)
+ mck.S3Client.EXPECT().DeleteObjects(gomock.Any(), gomock.Any(), gomock.Any()).Return(&s3.DeleteObjectsOutput{}, nil)
+ }),
+ Result("success", func(ctx context.Context, mck Mock) {
+ err := DeleteAllObjectVersionsAndDeleteMarkers(ctx, mck.S3Client, "test", "", true, false)
+ assert.NoError(t, err)
+ }),
+ ),
+ ),
+ )
+}
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml
index 1791be346..9b78ed051 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_linodeobjectstoragebuckets.yaml
@@ -60,6 +60,50 @@ spec:
description: LinodeObjectStorageBucketSpec defines the desired state of
LinodeObjectStorageBucket
properties:
+ accessKeyRef:
+ description: AccessKeyRef is a reference to a LinodeObjectStorageBucketKey
+ for the bucket.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
acl:
default: private
description: Acl sets the Access Control Level of the bucket using
@@ -90,6 +134,10 @@ spec:
type: string
type: object
x-kubernetes-map-type: atomic
+ forceDeleteBucket:
+ description: ForceDeleteBucket enables the object storage bucket used
+ to be deleted even if it contains objects.
+ type: boolean
region:
description: Region is the ID of the Object Storage region for the
bucket.
diff --git a/docs/src/reference/out.md b/docs/src/reference/out.md
index d7d7a63a8..92093b880 100644
--- a/docs/src/reference/out.md
+++ b/docs/src/reference/out.md
@@ -794,6 +794,8 @@ _Appears in:_
| `acl` _[ObjectStorageACL](#objectstorageacl)_ | Acl sets the Access Control Level of the bucket using a canned ACL string | private | Enum: [private public-read authenticated-read public-read-write]
|
| `corsEnabled` _boolean_ | corsEnabled enables for all origins in the bucket .If set to false, CORS is disabled for all origins in the bucket | true | |
| `credentialsRef` _[SecretReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretreference-v1-core)_ | CredentialsRef is a reference to a Secret that contains the credentials to use for provisioning the bucket.
If not supplied then the credentials of the controller will be used. | | |
+| `accessKeyRef` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectreference-v1-core)_ | AccessKeyRef is a reference to a LinodeObjectStorageBucketKey for the bucket. | | |
+| `forceDeleteBucket` _boolean_ | ForceDeleteBucket enables the object storage bucket used to be deleted even if it contains objects. | | |
#### LinodeObjectStorageBucketStatus
diff --git a/docs/src/topics/backups.md b/docs/src/topics/backups.md
index c4f6cdd54..28e1bd70e 100644
--- a/docs/src/topics/backups.md
+++ b/docs/src/topics/backups.md
@@ -162,4 +162,6 @@ spec:
### Resource Deletion
-When deleting a `LinodeObjectStorageKey` resource, CAPL will deprovision the access key and delete the managed secret. However, when deleting a `LinodeObjectStorageBucket` resource, CAPL will retain the underlying bucket to avoid unintended data loss.
+When deleting a `LinodeObjectStorageKey` resource, CAPL will deprovision the access key and delete the managed secret. However, when deleting a `LinodeObjectStorageBucket` resource, CAPL will retain the underlying bucket to avoid unintended data loss unless `.spec.forceDeleteBucket` is set to `true` in the `LinodeObjectStorageBucket` resource (defaults to `false`).
+
+When using etcd backups, the bucket can be cleaned up on cluster deletion by setting `FORCE_DELETE_OBJ_BUCKETS` to `true` (defaults to `false` to avoid unintended data loss).
diff --git a/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/assert-capi-resources.yaml b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/assert-capi-resources.yaml
new file mode 100644
index 000000000..8a5b8dbab
--- /dev/null
+++ b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/assert-capi-resources.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: capi-controller-manager
+ namespace: capi-system
+status:
+ availableReplicas: 1
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: capl-controller-manager
+ namespace: capl-system
+status:
+ availableReplicas: 1
diff --git a/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/chainsaw-test.yaml b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/chainsaw-test.yaml
new file mode 100755
index 000000000..e19d1f7cc
--- /dev/null
+++ b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/chainsaw-test.yaml
@@ -0,0 +1,102 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json
+apiVersion: chainsaw.kyverno.io/v1alpha1
+kind: Test
+metadata:
+ name: force-delete-linodeobjectstoragebucket
+ # Label to trigger the test on every PR
+ labels:
+ all:
+ quick:
+ linodeobj:
+spec:
+ bindings:
+ # A short identifier for the E2E test run
+ - name: run
+ value: (join('-', ['e2e', 'force-delete-obj', env('GIT_REF')]))
+ - name: bucket
+ # Format the bucket name into a valid Kubernetes object name
+ # TODO: This is over-truncated to account for the Kubernetes access key Secret
+ value: (trim((truncate(($run), `52`)), '-'))
+ - name: access_secret
+ value: (join('-', [($bucket), 'obj-key']))
+ template: true
+ steps:
+ - name: Check if CAPI provider resources exist
+ try:
+ - assert:
+ file: assert-capi-resources.yaml
+ - name: Create LinodeObjectStorageBucket and Key
+ try:
+ - apply:
+ file: create-bucket-and-key.yaml
+ # AFAICT this is the only way not have chainsaw immediately bomb out because resources don't exist
+ # or hang on assertion even after they do
+ - name: Wait for LinodeObjectStorageBucket and Key to be marked ready
+ try:
+ - script:
+ env:
+ - name: BUCKET
+ value: ($bucket)
+ content: |
+ set -e
+ kubectl -n $NAMESPACE wait --for=jsonpath='{.status.ready}'=true lobjkey $BUCKET --timeout=30s
+ kubectl -n $NAMESPACE wait --for=jsonpath='{.status.ready}'=true lobj $BUCKET --timeout=30s
+ - name: Check if the bucket was created
+ try:
+ - script:
+ env:
+ - name: BUCKET
+ value: ($bucket)
+ content: |
+ set -e
+ curl -s \
+ -H "Authorization: Bearer $LINODE_TOKEN" \
+ -H "Content-Type: application/json" \
+ "https://api.linode.com/v4/object-storage/buckets/us-sea-1/$BUCKET"
+ check:
+ ($error): ~
+ (json_parse($stdout)):
+ label: ($bucket)
+ - name: Ensure the access key was created
+ try:
+ - script:
+ env:
+ - name: URI
+ value: object-storage/keys
+ - name: OBJ_KEY
+ value: ($access_secret)
+ content: |
+ set -e
+
+ export KEY_ID=$(kubectl -n $NAMESPACE get lobjkey $OBJ_KEY -ojson | jq '.status.accessKeyRef')
+
+ curl -s \
+ -H "Authorization: Bearer $LINODE_TOKEN" \
+ -H "Content-Type: application/json" \
+ "https://api.linode.com/v4/$URI/$KEY_ID"
+ check:
+ ($error): ~
+ - name: Delete LinodeObjectStorageBucket
+ try:
+ - delete:
+ ref:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
+ kind: LinodeObjectStorageBucket
+ name: ($bucket)
+ - error:
+ file: check-lobj-deletion.yaml
+ - name: Check if the bucket was deleted
+ try:
+ - script:
+ env:
+ - name: BUCKET
+ value: ($bucket)
+ content: |
+ set -e
+ curl -s \
+ -H "Authorization: Bearer $LINODE_TOKEN" \
+ -H "Content-Type: application/json" \
+ "https://api.linode.com/v4/object-storage/buckets/us-sea-1/$BUCKET"
+ check:
+ ($stdout): |-
+ {"errors": [{"reason": "Not found"}]}
diff --git a/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/check-lobj-deletion.yaml b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/check-lobj-deletion.yaml
new file mode 100644
index 000000000..d5c53e336
--- /dev/null
+++ b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/check-lobj-deletion.yaml
@@ -0,0 +1,4 @@
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
+kind: LinodeObjectStorageBucket
+metadata:
+ name: ($bucket)
diff --git a/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/create-bucket-and-key.yaml b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/create-bucket-and-key.yaml
new file mode 100644
index 000000000..f83c55bbf
--- /dev/null
+++ b/e2e/linodeobjectstoragebucket-controller/force-delete-linodeobjectstoragebucket/create-bucket-and-key.yaml
@@ -0,0 +1,27 @@
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
+kind: LinodeObjectStorageBucket
+metadata:
+ name: ($bucket)
+spec:
+ accessKeyRef:
+ name: ($bucket)
+ forceDeleteBucket: true
+ region: us-sea
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
+kind: LinodeObjectStorageKey
+metadata:
+ name: ($bucket)
+spec:
+ bucketAccess:
+ - bucketName: ($bucket)
+ permissions: read_write
+ region: us-sea
+ generatedSecret:
+ format:
+ access: '{{ .AccessKey }}'
+ bucket: '{{ .BucketName }}'
+ endpoint: '{{ .S3Endpoint }}'
+ secret: '{{ .SecretKey }}'
+ type: Opaque
diff --git a/internal/controller/linodemachine_controller_test.go b/internal/controller/linodemachine_controller_test.go
index 8facb92d3..475ff412a 100644
--- a/internal/controller/linodemachine_controller_test.go
+++ b/internal/controller/linodemachine_controller_test.go
@@ -1800,6 +1800,7 @@ var _ = Describe("machine-delete", Ordered, Label("machine", "machine-delete"),
})),
Path(Result("delete requeues", func(ctx context.Context, mck Mock) {
+ linodeMachine.DeletionTimestamp = &metav1.Time{Time: time.Now()}
mck.LinodeClient.EXPECT().DeleteInstance(gomock.Any(), gomock.Any()).
Return(&linodego.Error{Code: http.StatusBadGateway})
res, err := reconciler.reconcileDelete(ctx, mck.Logger(), mScope)
diff --git a/internal/controller/linodeobjectstoragebucket_controller.go b/internal/controller/linodeobjectstoragebucket_controller.go
index 52ee95492..d2fe3737c 100644
--- a/internal/controller/linodeobjectstoragebucket_controller.go
+++ b/internal/controller/linodeobjectstoragebucket_controller.go
@@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/retry"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kutil "sigs.k8s.io/cluster-api/util"
conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2"
@@ -35,6 +36,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
crcontroller "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
@@ -60,9 +62,10 @@ type LinodeObjectStorageBucketReconciler struct {
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragebuckets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragebuckets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragebuckets/finalizers,verbs=update
+// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=linodeobjectstoragekeys/finalizers,verbs=update
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
-// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch;create;update;patch
+// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -99,7 +102,9 @@ func (r *LinodeObjectStorageBucketReconciler) Reconcile(ctx context.Context, req
}
// It will handle the case where the cluster is not found
- if err := util.SetOwnerReferenceToLinodeCluster(ctx, r.TracedClient(), cluster, objectStorageBucket, r.Scheme()); err != nil {
+ if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
+ return util.SetOwnerReferenceToLinodeCluster(ctx, r.TracedClient(), cluster, objectStorageBucket, r.Scheme())
+ }); err != nil {
logger.Error(err, "Failed to set owner reference to LinodeCluster")
return ctrl.Result{}, err
}
@@ -133,11 +138,15 @@ func (r *LinodeObjectStorageBucketReconciler) reconcile(ctx context.Context, bSc
}
}()
- if err := r.reconcileApply(ctx, bScope); err != nil {
- return res, err
+ // Handle deleted buckets
+ if !bScope.Bucket.DeletionTimestamp.IsZero() {
+ if err := r.reconcileDelete(ctx, bScope); err != nil {
+ return retryIfTransient(err, bScope.Logger)
+ }
+ return res, nil
}
- return res, nil
+ return r.reconcileApply(ctx, bScope)
}
func (r *LinodeObjectStorageBucketReconciler) setFailure(bScope *scope.ObjectStorageBucketScope, err error) {
@@ -151,18 +160,31 @@ func (r *LinodeObjectStorageBucketReconciler) setFailure(bScope *scope.ObjectSto
})
}
-func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context, bScope *scope.ObjectStorageBucketScope) error {
+func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context, bScope *scope.ObjectStorageBucketScope) (ctrl.Result, error) {
bScope.Logger.Info("Reconciling apply")
bScope.Bucket.Status.Ready = false
bScope.Bucket.Status.FailureMessage = nil
+ if bScope.Bucket.Spec.AccessKeyRef != nil {
+ // Only add finalizers if the access key reference is set, without one the bucket can immediately be deleted.
+ if err := bScope.AddFinalizer(ctx); err != nil {
+ bScope.Logger.Error(err, "failed to update bucket finalizer, requeuing")
+ return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil
+ }
+ // It's critical to add the access key finalizer, or else the access key could be deleted before the associated bucket with its contents deleted.
+ if err := bScope.AddAccessKeyRefFinalizer(ctx, bScope.Bucket.Name); err != nil {
+ bScope.Logger.Error(err, "failed to update access key finalizer, requeuing")
+ return ctrl.Result{RequeueAfter: reconciler.DefaultClusterControllerReconcileDelay}, nil
+ }
+ }
+
bucket, err := services.EnsureAndUpdateObjectStorageBucket(ctx, bScope)
if err != nil {
bScope.Logger.Error(err, "Failed to ensure bucket or update bucket")
r.setFailure(bScope, err)
- return err
+ return ctrl.Result{}, err
}
bScope.Bucket.Status.Hostname = util.Pointer(bucket.Hostname)
@@ -176,6 +198,33 @@ func (r *LinodeObjectStorageBucketReconciler) reconcileApply(ctx context.Context
Reason: "ObjectStorageBucketReady", // We have to set the reason to not fail object patching
})
+ return ctrl.Result{}, nil
+}
+
+func (r *LinodeObjectStorageBucketReconciler) reconcileDelete(ctx context.Context, bScope *scope.ObjectStorageBucketScope) error {
+ // Delete the bucket if force deletion is enabled
+ if bScope.Bucket.Spec.ForceDeleteBucket {
+ if err := services.DeleteBucket(ctx, bScope); err != nil {
+ bScope.Logger.Error(err, "failed to delete bucket")
+ r.setFailure(bScope, err)
+ return err
+ }
+ }
+ // Don't delete the bucket if the ForceDeleteBucket is false since there could be data in it that causes deletion to fail.
+
+ if bScope.Bucket.Spec.AccessKeyRef != nil {
+ // Retry on conflict to handle the case where the access key is being updated concurrently.
+ if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
+ return bScope.RemoveAccessKeyRefFinalizer(ctx, bScope.Bucket.Name)
+ }); err != nil {
+ bScope.Logger.Error(err, "failed to remove access key finalizer")
+ r.setFailure(bScope, err)
+ return err
+ }
+
+ controllerutil.RemoveFinalizer(bScope.Bucket, infrav1alpha2.BucketFinalizer)
+ }
+
return nil
}
diff --git a/internal/controller/linodeobjectstoragebucket_controller_test.go b/internal/controller/linodeobjectstoragebucket_controller_test.go
index 0bb4c3838..820cff398 100644
--- a/internal/controller/linodeobjectstoragebucket_controller_test.go
+++ b/internal/controller/linodeobjectstoragebucket_controller_test.go
@@ -23,6 +23,7 @@ import (
"github.com/linode/linodego"
"go.uber.org/mock/gomock"
+ corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -49,9 +50,42 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
ObjectMeta: metav1.ObjectMeta{
Name: "lifecycle",
Namespace: "default",
+ Finalizers: []string{
+ infrav1alpha2.BucketFinalizer,
+ },
},
Spec: infrav1alpha2.LinodeObjectStorageBucketSpec{
Region: "region",
+ AccessKeyRef: &corev1.ObjectReference{
+ Name: "lifecycle-mgmt",
+ },
+ },
+ }
+
+ key := infrav1alpha2.LinodeObjectStorageKey{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "lifecycle-mgmt",
+ Namespace: "default",
+ },
+ Spec: infrav1alpha2.LinodeObjectStorageKeySpec{
+ BucketAccess: []infrav1alpha2.BucketAccessRef{{
+ BucketName: "lifecycle",
+ Permissions: "read_write",
+ Region: "region",
+ }},
+ },
+ }
+
+ secret := corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "lifecycle-mgmt-obj-key",
+ Namespace: "default",
+ },
+ Data: map[string][]byte{
+ "access": []byte("access-key"),
+ "secret": []byte("secret-key"),
+ "endpoint": []byte("https://region-1.linodeobjects.com"),
+ "bucket": []byte("lifecycle"),
},
}
@@ -63,7 +97,9 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
BeforeAll(func(ctx SpecContext) {
bScope.Client = k8sClient
+ Expect(k8sClient.Create(ctx, &key)).To(Succeed())
Expect(k8sClient.Create(ctx, &obj)).To(Succeed())
+ Expect(k8sClient.Create(ctx, &secret)).To(Succeed())
})
suite.BeforeEach(func(ctx context.Context, mck Mock) {
@@ -73,6 +109,12 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
objectKey := client.ObjectKey{Name: "lifecycle", Namespace: "default"}
Expect(k8sClient.Get(ctx, objectKey, &obj)).To(Succeed())
+ accessKey := client.ObjectKey{Name: "lifecycle-mgmt", Namespace: "default"}
+ Expect(k8sClient.Get(ctx, accessKey, &key)).To(Succeed())
+
+ secretKey := client.ObjectKey{Name: "lifecycle-mgmt-obj-key", Namespace: "default"}
+ Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed())
+
// Create patch helper with latest state of resource.
// This is only needed when relying on envtest's k8sClient.
patchHelper, err := patch.NewHelper(&obj, k8sClient)
@@ -97,6 +139,8 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
Result("resource status is updated", func(ctx context.Context, mck Mock) {
objectKey := client.ObjectKeyFromObject(&obj)
bScope.LinodeClient = mck.LinodeClient
+ tmpClient := bScope.Client
+ bScope.Client = k8sClient
_, err := reconciler.reconcile(ctx, &bScope)
Expect(err).NotTo(HaveOccurred())
@@ -115,6 +159,8 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
logOutput := mck.Logs()
Expect(logOutput).To(ContainSubstring("Reconciling apply"))
+
+ bScope.Client = tmpClient
}),
),
Path(
@@ -210,15 +256,37 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
}),
),
),
- Call("resource is deleted", func(ctx context.Context, _ Mock) {
- Expect(k8sClient.Delete(ctx, &obj)).To(Succeed())
- }),
- Result("success", func(ctx context.Context, mck Mock) {
- objectKey := client.ObjectKeyFromObject(&obj)
- k8sClient.Get(ctx, objectKey, &obj)
- bScope.LinodeClient = mck.LinodeClient
- Expect(apierrors.IsNotFound(k8sClient.Get(ctx, objectKey, &obj))).To(BeTrue())
- }),
+ OneOf(
+ Path(
+ Call("unable to delete", func(ctx context.Context, mck Mock) {
+ obj.Spec.ForceDeleteBucket = true
+ obj.Spec.AccessKeyRef = &corev1.ObjectReference{Name: "lifecycle-mgmt", Namespace: "default"}
+ obj.DeletionTimestamp = &metav1.Time{Time: time.Now()}
+ Expect(k8sClient.Delete(ctx, &obj)).To(Succeed())
+ }),
+ OneOf(
+ Path(Result("cannot purge bucket", func(ctx context.Context, mck Mock) {
+ _, err := reconciler.reconcile(ctx, &bScope)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(mck.Logs()).To(ContainSubstring("failed to purge all objects"))
+ })),
+ ),
+ ),
+ Path(
+ Call("able to delete", func(ctx context.Context, _ Mock) {
+ obj.Spec.ForceDeleteBucket = false
+ obj.Spec.AccessKeyRef = nil
+ obj.DeletionTimestamp = &metav1.Time{Time: time.Now()}
+ Expect(k8sClient.Delete(ctx, &obj)).To(Succeed())
+ }),
+ // TODO: Mock smithy operations so we can test bucket deletion
+ Result("success for preserving bucket", func(ctx context.Context, mck Mock) {
+ res, err := reconciler.reconcile(ctx, &bScope)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(res.RequeueAfter).To(Equal(time.Duration(0)))
+ }),
+ ),
+ ),
)
})
diff --git a/mock/client.go b/mock/client.go
index b8b22c0a7..dffd97f6b 100644
--- a/mock/client.go
+++ b/mock/client.go
@@ -326,6 +326,20 @@ func (mr *MockLinodeClientMockRecorder) DeleteNodeBalancerNode(ctx, nodebalancer
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeBalancerNode", reflect.TypeOf((*MockLinodeClient)(nil).DeleteNodeBalancerNode), ctx, nodebalancerID, configID, nodeID)
}
+// DeleteObjectStorageBucket mocks base method.
+func (m *MockLinodeClient) DeleteObjectStorageBucket(ctx context.Context, regionID, label string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteObjectStorageBucket", ctx, regionID, label)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteObjectStorageBucket indicates an expected call of DeleteObjectStorageBucket.
+func (mr *MockLinodeClientMockRecorder) DeleteObjectStorageBucket(ctx, regionID, label any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectStorageBucket", reflect.TypeOf((*MockLinodeClient)(nil).DeleteObjectStorageBucket), ctx, regionID, label)
+}
+
// DeleteObjectStorageKey mocks base method.
func (m *MockLinodeClient) DeleteObjectStorageKey(ctx context.Context, keyID int) error {
m.ctrl.T.Helper()
@@ -1583,6 +1597,20 @@ func (mr *MockLinodeObjectStorageClientMockRecorder) CreateObjectStorageKey(ctx,
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateObjectStorageKey", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).CreateObjectStorageKey), ctx, opts)
}
+// DeleteObjectStorageBucket mocks base method.
+func (m *MockLinodeObjectStorageClient) DeleteObjectStorageBucket(ctx context.Context, regionID, label string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteObjectStorageBucket", ctx, regionID, label)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteObjectStorageBucket indicates an expected call of DeleteObjectStorageBucket.
+func (mr *MockLinodeObjectStorageClientMockRecorder) DeleteObjectStorageBucket(ctx, regionID, label any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectStorageBucket", reflect.TypeOf((*MockLinodeObjectStorageClient)(nil).DeleteObjectStorageBucket), ctx, regionID, label)
+}
+
// DeleteObjectStorageKey mocks base method.
func (m *MockLinodeObjectStorageClient) DeleteObjectStorageKey(ctx context.Context, keyID int) error {
m.ctrl.T.Helper()
@@ -2306,6 +2334,46 @@ func (mr *MockS3ClientMockRecorder) DeleteObject(ctx, params any, optFns ...any)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockS3Client)(nil).DeleteObject), varargs...)
}
+// DeleteObjects mocks base method.
+func (m *MockS3Client) DeleteObjects(ctx context.Context, params *s3.DeleteObjectsInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{ctx, params}
+ for _, a := range optFns {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "DeleteObjects", varargs...)
+ ret0, _ := ret[0].(*s3.DeleteObjectsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeleteObjects indicates an expected call of DeleteObjects.
+func (mr *MockS3ClientMockRecorder) DeleteObjects(ctx, params any, optFns ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{ctx, params}, optFns...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockS3Client)(nil).DeleteObjects), varargs...)
+}
+
+// GetBucketVersioning mocks base method.
+func (m *MockS3Client) GetBucketVersioning(ctx context.Context, params *s3.GetBucketVersioningInput, optFns ...func(*s3.Options)) (*s3.GetBucketVersioningOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{ctx, params}
+ for _, a := range optFns {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "GetBucketVersioning", varargs...)
+ ret0, _ := ret[0].(*s3.GetBucketVersioningOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBucketVersioning indicates an expected call of GetBucketVersioning.
+func (mr *MockS3ClientMockRecorder) GetBucketVersioning(ctx, params any, optFns ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{ctx, params}, optFns...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioning", reflect.TypeOf((*MockS3Client)(nil).GetBucketVersioning), varargs...)
+}
+
// HeadObject mocks base method.
func (m *MockS3Client) HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) {
m.ctrl.T.Helper()
@@ -2326,6 +2394,46 @@ func (mr *MockS3ClientMockRecorder) HeadObject(ctx, params any, optFns ...any) *
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockS3Client)(nil).HeadObject), varargs...)
}
+// ListObjectVersions mocks base method.
+func (m *MockS3Client) ListObjectVersions(ctx context.Context, params *s3.ListObjectVersionsInput, f ...func(*s3.Options)) (*s3.ListObjectVersionsOutput, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{ctx, params}
+ for _, a := range f {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListObjectVersions", varargs...)
+ ret0, _ := ret[0].(*s3.ListObjectVersionsOutput)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListObjectVersions indicates an expected call of ListObjectVersions.
+func (mr *MockS3ClientMockRecorder) ListObjectVersions(ctx, params any, f ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{ctx, params}, f...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockS3Client)(nil).ListObjectVersions), varargs...)
+}
+
+// ListObjectsV2 mocks base method.
+func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{ctx, params}
+ for _, a := range optFns {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "ListObjectsV2", varargs...)
+ ret0, _ := ret[0].(*s3.ListObjectsV2Output)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ListObjectsV2 indicates an expected call of ListObjectsV2.
+func (mr *MockS3ClientMockRecorder) ListObjectsV2(ctx, params any, optFns ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{ctx, params}, optFns...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2", reflect.TypeOf((*MockS3Client)(nil).ListObjectsV2), varargs...)
+}
+
// PutObject mocks base method.
func (m *MockS3Client) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) {
m.ctrl.T.Helper()
diff --git a/observability/wrappers/linodeclient/linodeclient.gen.go b/observability/wrappers/linodeclient/linodeclient.gen.go
index 4e8ae34c7..35d744377 100644
--- a/observability/wrappers/linodeclient/linodeclient.gen.go
+++ b/observability/wrappers/linodeclient/linodeclient.gen.go
@@ -514,6 +514,31 @@ func (_d LinodeClientWithTracing) DeleteNodeBalancerNode(ctx context.Context, no
return _d.LinodeClient.DeleteNodeBalancerNode(ctx, nodebalancerID, configID, nodeID)
}
+// DeleteObjectStorageBucket implements _sourceClients.LinodeClient
+func (_d LinodeClientWithTracing) DeleteObjectStorageBucket(ctx context.Context, regionID string, label string) (err error) {
+ ctx, _span := tracing.Start(ctx, "_sourceClients.LinodeClient.DeleteObjectStorageBucket")
+ defer func() {
+ if _d._spanDecorator != nil {
+ _d._spanDecorator(_span, map[string]interface{}{
+ "ctx": ctx,
+ "regionID": regionID,
+ "label": label}, map[string]interface{}{
+ "err": err})
+ }
+
+ if err != nil {
+ _span.RecordError(err)
+ _span.SetAttributes(
+ attribute.String("event", "error"),
+ attribute.String("message", err.Error()),
+ )
+ }
+
+ _span.End()
+ }()
+ return _d.LinodeClient.DeleteObjectStorageBucket(ctx, regionID, label)
+}
+
// DeleteObjectStorageKey implements _sourceClients.LinodeClient
func (_d LinodeClientWithTracing) DeleteObjectStorageKey(ctx context.Context, keyID int) (err error) {
ctx, _span := tracing.Start(ctx, "_sourceClients.LinodeClient.DeleteObjectStorageKey")
diff --git a/templates/addons/cluster-object-store/cluster-object-store.yaml b/templates/addons/cluster-object-store/cluster-object-store.yaml
index 855c4e28a..e58c30f7c 100644
--- a/templates/addons/cluster-object-store/cluster-object-store.yaml
+++ b/templates/addons/cluster-object-store/cluster-object-store.yaml
@@ -14,6 +14,7 @@ spec:
credentialsRef:
name: ${CLUSTER_NAME}-credentials
region: ${OBJ_BUCKET_REGION:=${LINODE_REGION}}
+ forceDeleteBucket: ${FORCE_DELETE_OBJ_BUCKETS:=false}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LinodeObjectStorageKey
diff --git a/templates/addons/etcd-backup-restore/linode-obj.yaml b/templates/addons/etcd-backup-restore/linode-obj.yaml
index 07026d501..cf60fe3f2 100644
--- a/templates/addons/etcd-backup-restore/linode-obj.yaml
+++ b/templates/addons/etcd-backup-restore/linode-obj.yaml
@@ -13,10 +13,40 @@ metadata:
spec:
credentialsRef:
name: ${CLUSTER_NAME}-credentials
+ accessKeyRef:
+ name: ${CLUSTER_NAME}-etcd-backup-mgmt
+ forceDeleteBucket: ${FORCE_DELETE_OBJ_BUCKETS:=false}
region: ${OBJ_BUCKET_REGION:=${LINODE_REGION}}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LinodeObjectStorageKey
+metadata:
+ labels:
+ app.kubernetes.io/created-by: cluster-api-provider-linode
+ app.kubernetes.io/instance: ${CLUSTER_NAME}-etcd-backup
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: linodeobjectstoragekey
+ app.kubernetes.io/part-of: cluster-api-provider-linode
+ cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
+ name: ${CLUSTER_NAME}-etcd-backup-mgmt
+ namespace: default
+spec:
+ bucketAccess:
+ - bucketName: ${CLUSTER_NAME}-etcd-backup
+ permissions: read_write
+ region: ${OBJ_BUCKET_REGION:=${LINODE_REGION}}
+ credentialsRef:
+ name: ${CLUSTER_NAME}-credentials
+ generatedSecret:
+ format:
+ access: '{{ .AccessKey }}'
+ bucket: '{{ .BucketName }}'
+ endpoint: '{{ .S3Endpoint }}'
+ secret: '{{ .SecretKey }}'
+ type: Opaque
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
+kind: LinodeObjectStorageKey
metadata:
labels:
app.kubernetes.io/name: linodeobjectstoragekey