Skip to content

Commit

Permalink
Release backup schedule and policy finalizers
Browse files Browse the repository at this point in the history
Signed-off-by: Jose Vazquez <jose.vazquez@mongodb.com>
  • Loading branch information
josvazg committed Aug 16, 2023
1 parent 3f4ae65 commit 99b4d18
Show file tree
Hide file tree
Showing 6 changed files with 388 additions and 92 deletions.
5 changes: 2 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,8 @@ lint:
golangci-lint run

$(TIMESTAMPS_DIR)/fmt: $(GO_SOURCES)
go fmt ./...
find . -name "*.go" -not -path "./vendor/*" -exec gofmt -w "{}" \;
find . -name "*.go" -not -path "./vendor/*" -exec goimports -local github.com/mongodb/mongodb-atlas-kubernetes -l -w "{}" \;
@echo "goimports -local github.com/mongodb/mongodb-atlas-kubernetes -l -w \$$(GO_SOURCES)"
@goimports -local github.com/mongodb/mongodb-atlas-kubernetes -l -w $(GO_SOURCES)
@mkdir -p $(TIMESTAMPS_DIR) && touch $@

.PHONY: fmt
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/atlasdeployment/advanced_deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func TestAdvancedDeploymentsEqual(t *testing.T) {

logger, _ := zap.NewProduction()
areEqual, _ := AdvancedDeploymentsEqual(logger.Sugar(), merged, atlas)
assert.True(t, areEqual, "Deploymnts should be equal")
assert.True(t, areEqual, "Deployments should be equal")
})
}

Expand Down
21 changes: 16 additions & 5 deletions pkg/controller/atlasdeployment/atlasdeployment_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,19 +225,19 @@ func (r *AtlasDeploymentReconciler) checkDeploymentIsManaged(
project *mdbv1.AtlasProject,
deployment *mdbv1.AtlasDeployment,
) workflow.Result {
dply := deployment
advancedDeployment := deployment
if deployment.IsLegacyDeployment() {
dply = deployment.DeepCopy()
if err := ConvertLegacyDeployment(&dply.Spec); err != nil {
advancedDeployment = deployment.DeepCopy()
if err := ConvertLegacyDeployment(&advancedDeployment.Spec); err != nil {
result := workflow.Terminate(workflow.Internal, err.Error())
log.Errorw("failed to temporary convert legacy deployment", "error", err)
return result
}
dply.Spec.DeploymentSpec = nil
advancedDeployment.Spec.DeploymentSpec = nil
}

owner, err := customresource.IsOwner(
dply,
advancedDeployment,
r.ObjectDeletionProtection,
customresource.IsResourceManagedByOperator,
managedByAtlas(context, workflowCtx.Client, project.ID(), log),
Expand Down Expand Up @@ -288,6 +288,11 @@ func (r *AtlasDeploymentReconciler) handleDeletion(

if !deployment.GetDeletionTimestamp().IsZero() {
if customresource.HaveFinalizer(deployment, customresource.FinalizerLabel) {
if err := r.cleanupBindings(context, deployment); err != nil {
result := workflow.Terminate(workflow.Internal, err.Error())
log.Errorw("failed to cleanup deployment bindings (backups)", "error", err)
return true, result
}
isProtected := customresource.IsResourceProtected(deployment, r.ObjectDeletionProtection)
if isProtected {
log.Info("Not removing Atlas deployment from Atlas as per configuration")
Expand Down Expand Up @@ -315,6 +320,12 @@ func (r *AtlasDeploymentReconciler) handleDeletion(
return false, workflow.OK()
}

func (r *AtlasDeploymentReconciler) cleanupBindings(context context.Context, deployment *mdbv1.AtlasDeployment) error {
r.Log.Debug("Cleaning up deployment bindings (backup)")

return r.garbageCollectBackupResource(context, deployment.GetDeploymentName())
}

func modifyProviderSettings(pSettings *mdbv1.ProviderSettingsSpec, deploymentType string) {
if pSettings == nil || string(pSettings.ProviderName) == deploymentType {
return
Expand Down
203 changes: 202 additions & 1 deletion pkg/controller/atlasdeployment/atlasdeployment_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package atlasdeployment

import (
"context"
"fmt"
"log"
"regexp"
"testing"
Expand All @@ -31,14 +32,18 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

v1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1/common"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1/status"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/atlas"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/customresource"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/watch"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/workflow"
"github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/kube"
)

const (
Expand Down Expand Up @@ -411,6 +416,143 @@ func TestDeleteAnnotatedDeploymentGetRemoved(t *testing.T) {
}
}

func TestCleanupBindings(t *testing.T) {
t.Run("without backup references, nothing happens on cleanup", func(t *testing.T) {
r := &AtlasDeploymentReconciler{
Log: testLog(t),
Client: testK8sClient(),
}
d := &v1.AtlasDeployment{} // dummy deployment

// test cleanup
assert.NoError(t, r.cleanupBindings(context.Background(), d))
})

t.Run("with unreferenced backups, still nothing happens on cleanup", func(t *testing.T) {
r := &AtlasDeploymentReconciler{
Log: testLog(t),
Client: testK8sClient(),
}
dn := testDeploymentName("") // deployment, schedule, policy (NOT connected)
deployment := &v1.AtlasDeployment{
ObjectMeta: metav1.ObjectMeta{Name: dn.Name, Namespace: dn.Namespace},
}
require.NoError(t, r.Client.Create(context.Background(), deployment))
policy := testBackupPolicy()
require.NoError(t, r.Client.Create(context.Background(), policy))
schedule := testBackupSchedule("", policy)
require.NoError(t, r.Client.Create(context.Background(), schedule))

// test cleanup
require.NoError(t, r.cleanupBindings(context.Background(), deployment))

endPolicy := &v1.AtlasBackupPolicy{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(policy), endPolicy))
assert.Equal(t, []string{customresource.FinalizerLabel}, endPolicy.Finalizers)
endSchedule := &v1.AtlasBackupSchedule{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(schedule), endSchedule))
assert.Equal(t, []string{customresource.FinalizerLabel}, endSchedule.Finalizers)
})

t.Run("last deployment's referenced backups finalizers are cleaned up", func(t *testing.T) {
r := &AtlasDeploymentReconciler{
Log: testLog(t),
Client: testK8sClient(),
}
policy := testBackupPolicy() // deployment -> schedule -> policy
require.NoError(t, r.Client.Create(context.Background(), policy))
schedule := testBackupSchedule("", policy)
deployment := testDeployment("", schedule)
require.NoError(t, r.Client.Create(context.Background(), deployment))
schedule.Status.DeploymentIDs = []string{deployment.Spec.AdvancedDeploymentSpec.Name}
require.NoError(t, r.Client.Create(context.Background(), schedule))

// test ensureBackupPolicy and cleanup
_, err := r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule, &[]watch.WatchedObject{})
require.NoError(t, err)
require.NoError(t, r.cleanupBindings(context.Background(), deployment))

endPolicy := &v1.AtlasBackupPolicy{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(policy), endPolicy))
assert.Empty(t, endPolicy.Finalizers, "policy should end up with no finalizer")
endSchedule := &v1.AtlasBackupSchedule{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(schedule), endSchedule))
assert.Empty(t, endSchedule.Finalizers, "schedule should end up with no finalizer")
})

t.Run("referenced backups finalizers are NOT cleaned up if reachable by other deployment", func(t *testing.T) {
r := &AtlasDeploymentReconciler{
Log: testLog(t),
Client: testK8sClient(),
}
policy := testBackupPolicy() // deployment + deployment2 -> schedule -> policy
require.NoError(t, r.Client.Create(context.Background(), policy))
schedule := testBackupSchedule("", policy)
deployment := testDeployment("", schedule)
require.NoError(t, r.Client.Create(context.Background(), deployment))
deployment2 := testDeployment("2", schedule)
require.NoError(t, r.Client.Create(context.Background(), deployment2))
schedule.Status.DeploymentIDs = []string{
deployment.Spec.AdvancedDeploymentSpec.Name,
deployment2.Spec.AdvancedDeploymentSpec.Name,
}
require.NoError(t, r.Client.Create(context.Background(), schedule))

// test cleanup
_, err := r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule, &[]watch.WatchedObject{})
require.NoError(t, err)
require.NoError(t, r.cleanupBindings(context.Background(), deployment))

endPolicy := &v1.AtlasBackupPolicy{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(policy), endPolicy))
assert.NotEmpty(t, endPolicy.Finalizers, "policy should keep the finalizer")
endSchedule := &v1.AtlasBackupSchedule{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(schedule), endSchedule))
assert.NotEmpty(t, endSchedule.Finalizers, "schedule should keep the finalizer")
})

t.Run("policy finalizer stays if still referenced", func(t *testing.T) {
r := &AtlasDeploymentReconciler{
Log: testLog(t),
Client: testK8sClient(),
}
policy := testBackupPolicy() // deployment -> schedule + schedule2 -> policy
require.NoError(t, r.Client.Create(context.Background(), policy))
schedule := testBackupSchedule("", policy)
schedule2 := testBackupSchedule("2", policy)
deployment := testDeployment("", schedule)
require.NoError(t, r.Client.Create(context.Background(), deployment))
deployment2 := testDeployment("2", schedule2)
require.NoError(t, r.Client.Create(context.Background(), deployment2))
schedule.Status.DeploymentIDs = []string{
deployment.Spec.AdvancedDeploymentSpec.Name,
}
require.NoError(t, r.Client.Create(context.Background(), schedule))
schedule2.Status.DeploymentIDs = []string{
deployment2.Spec.AdvancedDeploymentSpec.Name,
}
require.NoError(t, r.Client.Create(context.Background(), schedule2))
policy.Status.BackupScheduleIDs = []string{
fmt.Sprintf("%s/%s", schedule.Namespace, schedule.Name),
fmt.Sprintf("%s/%s", schedule2.Namespace, schedule2.Name),
}

// test cleanup
_, err := r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule, &[]watch.WatchedObject{})
require.NoError(t, err)
_, err = r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule2, &[]watch.WatchedObject{})
require.NoError(t, err)
require.NoError(t, r.cleanupBindings(context.Background(), deployment))

endPolicy := &v1.AtlasBackupPolicy{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKey(policy.Namespace, policy.Name), endPolicy))
assert.NotEmpty(t, endPolicy.Finalizers, "policy should keep the finalizer")
endSchedule := &v1.AtlasBackupSchedule{}
require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKey(schedule.Namespace, schedule.Name), endSchedule))
assert.Empty(t, endSchedule.Finalizers, "schedule should end up with no finalizer")
})
}

func differentAdvancedDeployment(ns string) *mongodbatlas.AdvancedCluster {
project := testProject(ns)
deployment := v1.NewDeployment(project.Namespace, fakeDeployment, fakeDeployment)
Expand Down Expand Up @@ -477,8 +619,11 @@ func newTestDeploymentEnv(t *testing.T,

func testK8sClient() client.Client {
sch := runtime.NewScheme()
sch.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.SecretList{})
sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasDeployment{})
sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasBackupSchedule{})
sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasBackupScheduleList{})
sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasBackupPolicy{})
sch.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.SecretList{})
return fake.NewClientBuilder().WithScheme(sch).Build()
}

Expand Down Expand Up @@ -535,3 +680,59 @@ func intoServerlessAtlasCluster(serverlessSpec *v1.ServerlessSpec) *mongodbatlas
}
return ac
}

func testDeploymentName(suffix string) types.NamespacedName {
return types.NamespacedName{
Name: fmt.Sprintf("test-deployment%s", suffix),
Namespace: "test-namespace",
}
}

func testDeployment(suffix string, schedule *v1.AtlasBackupSchedule) *v1.AtlasDeployment {
dn := testDeploymentName(suffix)
return &v1.AtlasDeployment{
ObjectMeta: metav1.ObjectMeta{Name: dn.Name, Namespace: dn.Namespace},
Spec: v1.AtlasDeploymentSpec{
AdvancedDeploymentSpec: &v1.AdvancedDeploymentSpec{
Name: fmt.Sprintf("atlas-%s", dn.Name),
},
BackupScheduleRef: common.ResourceRefNamespaced{
Name: schedule.Name,
Namespace: schedule.Namespace,
},
},
}
}

func testBackupSchedule(suffix string, policy *v1.AtlasBackupPolicy) *v1.AtlasBackupSchedule {
return &v1.AtlasBackupSchedule{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("test-backup-schedule%s", suffix),
Namespace: "test-namespace",
Finalizers: []string{customresource.FinalizerLabel},
},
Spec: v1.AtlasBackupScheduleSpec{
PolicyRef: common.ResourceRefNamespaced{Name: policy.Name, Namespace: policy.Namespace},
},
}
}

func testBackupPolicy() *v1.AtlasBackupPolicy {
return &v1.AtlasBackupPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "test-backup-policy",
Namespace: "test-namespace",
Finalizers: []string{customresource.FinalizerLabel},
},
Spec: v1.AtlasBackupPolicySpec{
Items: []v1.AtlasBackupPolicyItem{
{
FrequencyType: "weekly",
FrequencyInterval: 1,
RetentionUnit: "days",
RetentionValue: 7,
},
},
},
}
}
Loading

0 comments on commit 99b4d18

Please sign in to comment.