diff --git a/Makefile b/Makefile
index ae40316383..23594db0fb 100644
--- a/Makefile
+++ b/Makefile
@@ -155,12 +155,11 @@ manifests: fmt controller-gen $(TIMESTAMPS_DIR)/manifests ## Generate manifests
 
 .PHONY: lint
 lint:
-	golangci-lint run
+	golangci-lint run --fast
 
 $(TIMESTAMPS_DIR)/fmt: $(GO_SOURCES)
-	go fmt ./...
-	find . -name "*.go" -not -path "./vendor/*" -exec gofmt -w "{}" \;
-	find . -name "*.go" -not -path "./vendor/*" -exec goimports -local github.com/mongodb/mongodb-atlas-kubernetes -l -w "{}" \;
+	@echo "goimports -local github.com/mongodb/mongodb-atlas-kubernetes -l -w \$$(GO_SOURCES)"
+	@goimports -local github.com/mongodb/mongodb-atlas-kubernetes -l -w $(GO_SOURCES)
 	@mkdir -p $(TIMESTAMPS_DIR) && touch $@
 
 .PHONY: fmt
diff --git a/pkg/controller/atlasdeployment/advanced_deployment_test.go b/pkg/controller/atlasdeployment/advanced_deployment_test.go
index 60845cf639..4239fc95b1 100644
--- a/pkg/controller/atlasdeployment/advanced_deployment_test.go
+++ b/pkg/controller/atlasdeployment/advanced_deployment_test.go
@@ -63,7 +63,7 @@ func TestAdvancedDeploymentsEqual(t *testing.T) {
 
 		logger, _ := zap.NewProduction()
 		areEqual, _ := AdvancedDeploymentsEqual(logger.Sugar(), merged, atlas)
-		assert.True(t, areEqual, "Deploymnts should be equal")
+		assert.True(t, areEqual, "Deployments should be equal")
 	})
 }
 
diff --git a/pkg/controller/atlasdeployment/atlasdeployment_controller.go b/pkg/controller/atlasdeployment/atlasdeployment_controller.go
index 5aa18431b5..82f8fbe81a 100644
--- a/pkg/controller/atlasdeployment/atlasdeployment_controller.go
+++ b/pkg/controller/atlasdeployment/atlasdeployment_controller.go
@@ -225,19 +225,19 @@ func (r *AtlasDeploymentReconciler) checkDeploymentIsManaged(
 	project *mdbv1.AtlasProject,
 	deployment *mdbv1.AtlasDeployment,
 ) workflow.Result {
-	dply := deployment
+	advancedDeployment := deployment
 	if deployment.IsLegacyDeployment() {
-		dply = deployment.DeepCopy()
-		if err := ConvertLegacyDeployment(&dply.Spec); err != nil {
+		advancedDeployment = deployment.DeepCopy()
+		if err := ConvertLegacyDeployment(&advancedDeployment.Spec); err != nil {
 			result := workflow.Terminate(workflow.Internal, err.Error())
 			log.Errorw("failed to temporary convert legacy deployment", "error", err)
 			return result
 		}
-		dply.Spec.DeploymentSpec = nil
+		advancedDeployment.Spec.DeploymentSpec = nil
 	}
 
 	owner, err := customresource.IsOwner(
-		dply,
+		advancedDeployment,
 		r.ObjectDeletionProtection,
 		customresource.IsResourceManagedByOperator,
 		managedByAtlas(context, workflowCtx.Client, project.ID(), log),
@@ -310,11 +310,22 @@ func (r *AtlasDeploymentReconciler) handleDeletion(
 				return true, result
 			}
 		}
+		if err := r.cleanupBindings(context, deployment); err != nil {
+			result := workflow.Terminate(workflow.Internal, err.Error())
+			log.Errorw("failed to cleanup deployment bindings (backups)", "error", err)
+			return true, result
+		}
 		return true, prevResult
 	}
 	return false, workflow.OK()
 }
 
+func (r *AtlasDeploymentReconciler) cleanupBindings(context context.Context, deployment *mdbv1.AtlasDeployment) error {
+	r.Log.Debug("Cleaning up deployment bindings (backup)")
+
+	return r.garbageCollectBackupResource(context, deployment.GetDeploymentName())
+}
+
 func modifyProviderSettings(pSettings *mdbv1.ProviderSettingsSpec, deploymentType string) {
 	if pSettings == nil || string(pSettings.ProviderName) == deploymentType {
 		return
diff --git a/pkg/controller/atlasdeployment/atlasdeployment_controller_test.go b/pkg/controller/atlasdeployment/atlasdeployment_controller_test.go
index 1123c8dd4a..01f43d5f1f 100644
--- a/pkg/controller/atlasdeployment/atlasdeployment_controller_test.go
+++ b/pkg/controller/atlasdeployment/atlasdeployment_controller_test.go
@@ -18,6 +18,7 @@ package atlasdeployment
 
 import (
 	"context"
+	"fmt"
 	"log"
 	"regexp"
 	"testing"
@@ -31,14 +32,18 @@ import (
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 
 	v1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1"
+	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1/common"
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1/status"
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/atlas"
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/customresource"
+	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/watch"
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/workflow"
+	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/kube"
 )
 
 const (
@@ -411,6 +416,143 @@ func TestDeleteAnnotatedDeploymentGetRemoved(t *testing.T) {
 	}
 }
 
+func TestCleanupBindings(t *testing.T) {
+	t.Run("without backup references, nothing happens on cleanup", func(t *testing.T) {
+		r := &AtlasDeploymentReconciler{
+			Log:    testLog(t),
+			Client: testK8sClient(),
+		}
+		d := &v1.AtlasDeployment{} // dummy deployment
+
+		// test cleanup
+		assert.NoError(t, r.cleanupBindings(context.Background(), d))
+	})
+
+	t.Run("with unreferenced backups, still nothing happens on cleanup", func(t *testing.T) {
+		r := &AtlasDeploymentReconciler{
+			Log:    testLog(t),
+			Client: testK8sClient(),
+		}
+		dn := testDeploymentName("") // deployment, schedule, policy (NOT connected)
+		deployment := &v1.AtlasDeployment{
+			ObjectMeta: metav1.ObjectMeta{Name: dn.Name, Namespace: dn.Namespace},
+		}
+		require.NoError(t, r.Client.Create(context.Background(), deployment))
+		policy := testBackupPolicy()
+		require.NoError(t, r.Client.Create(context.Background(), policy))
+		schedule := testBackupSchedule("", policy)
+		require.NoError(t, r.Client.Create(context.Background(), schedule))
+
+		// test cleanup
+		require.NoError(t, r.cleanupBindings(context.Background(), deployment))
+
+		endPolicy := &v1.AtlasBackupPolicy{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(policy), endPolicy))
+		assert.Equal(t, []string{customresource.FinalizerLabel}, endPolicy.Finalizers)
+		endSchedule := &v1.AtlasBackupSchedule{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(schedule), endSchedule))
+		assert.Equal(t, []string{customresource.FinalizerLabel}, endSchedule.Finalizers)
+	})
+
+	t.Run("last deployment's referenced backups finalizers are cleaned up", func(t *testing.T) {
+		r := &AtlasDeploymentReconciler{
+			Log:    testLog(t),
+			Client: testK8sClient(),
+		}
+		policy := testBackupPolicy() // deployment -> schedule -> policy
+		require.NoError(t, r.Client.Create(context.Background(), policy))
+		schedule := testBackupSchedule("", policy)
+		deployment := testDeployment("", schedule)
+		require.NoError(t, r.Client.Create(context.Background(), deployment))
+		schedule.Status.DeploymentIDs = []string{deployment.Spec.AdvancedDeploymentSpec.Name}
+		require.NoError(t, r.Client.Create(context.Background(), schedule))
+
+		// test ensureBackupPolicy and cleanup
+		_, err := r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule, &[]watch.WatchedObject{})
+		require.NoError(t, err)
+		require.NoError(t, r.cleanupBindings(context.Background(), deployment))
+
+		endPolicy := &v1.AtlasBackupPolicy{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(policy), endPolicy))
+		assert.Empty(t, endPolicy.Finalizers, "policy should end up with no finalizer")
+		endSchedule := &v1.AtlasBackupSchedule{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(schedule), endSchedule))
+		assert.Empty(t, endSchedule.Finalizers, "schedule should end up with no finalizer")
+	})
+
+	t.Run("referenced backups finalizers are NOT cleaned up if reachable by other deployment", func(t *testing.T) {
+		r := &AtlasDeploymentReconciler{
+			Log:    testLog(t),
+			Client: testK8sClient(),
+		}
+		policy := testBackupPolicy() // deployment + deployment2 -> schedule -> policy
+		require.NoError(t, r.Client.Create(context.Background(), policy))
+		schedule := testBackupSchedule("", policy)
+		deployment := testDeployment("", schedule)
+		require.NoError(t, r.Client.Create(context.Background(), deployment))
+		deployment2 := testDeployment("2", schedule)
+		require.NoError(t, r.Client.Create(context.Background(), deployment2))
+		schedule.Status.DeploymentIDs = []string{
+			deployment.Spec.AdvancedDeploymentSpec.Name,
+			deployment2.Spec.AdvancedDeploymentSpec.Name,
+		}
+		require.NoError(t, r.Client.Create(context.Background(), schedule))
+
+		// test cleanup
+		_, err := r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule, &[]watch.WatchedObject{})
+		require.NoError(t, err)
+		require.NoError(t, r.cleanupBindings(context.Background(), deployment))
+
+		endPolicy := &v1.AtlasBackupPolicy{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(policy), endPolicy))
+		assert.NotEmpty(t, endPolicy.Finalizers, "policy should keep the finalizer")
+		endSchedule := &v1.AtlasBackupSchedule{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKeyFromObject(schedule), endSchedule))
+		assert.NotEmpty(t, endSchedule.Finalizers, "schedule should keep the finalizer")
+	})
+
+	t.Run("policy finalizer stays if still referenced", func(t *testing.T) {
+		r := &AtlasDeploymentReconciler{
+			Log:    testLog(t),
+			Client: testK8sClient(),
+		}
+		policy := testBackupPolicy() // deployment -> schedule + schedule2 -> policy
+		require.NoError(t, r.Client.Create(context.Background(), policy))
+		schedule := testBackupSchedule("", policy)
+		schedule2 := testBackupSchedule("2", policy)
+		deployment := testDeployment("", schedule)
+		require.NoError(t, r.Client.Create(context.Background(), deployment))
+		deployment2 := testDeployment("2", schedule2)
+		require.NoError(t, r.Client.Create(context.Background(), deployment2))
+		schedule.Status.DeploymentIDs = []string{
+			deployment.Spec.AdvancedDeploymentSpec.Name,
+		}
+		require.NoError(t, r.Client.Create(context.Background(), schedule))
+		schedule2.Status.DeploymentIDs = []string{
+			deployment2.Spec.AdvancedDeploymentSpec.Name,
+		}
+		require.NoError(t, r.Client.Create(context.Background(), schedule2))
+		policy.Status.BackupScheduleIDs = []string{
+			fmt.Sprintf("%s/%s", schedule.Namespace, schedule.Name),
+			fmt.Sprintf("%s/%s", schedule2.Namespace, schedule2.Name),
+		}
+
+		// test cleanup
+		_, err := r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule, &[]watch.WatchedObject{})
+		require.NoError(t, err)
+		_, err = r.ensureBackupPolicy(context.Background(), &workflow.Context{}, schedule2, &[]watch.WatchedObject{})
+		require.NoError(t, err)
+		require.NoError(t, r.cleanupBindings(context.Background(), deployment))
+
+		endPolicy := &v1.AtlasBackupPolicy{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKey(policy.Namespace, policy.Name), endPolicy))
+		assert.NotEmpty(t, endPolicy.Finalizers, "policy should keep the finalizer")
+		endSchedule := &v1.AtlasBackupSchedule{}
+		require.NoError(t, r.Client.Get(context.Background(), kube.ObjectKey(schedule.Namespace, schedule.Name), endSchedule))
+		assert.Empty(t, endSchedule.Finalizers, "schedule should end up with no finalizer")
+	})
+}
+
 func differentAdvancedDeployment(ns string) *mongodbatlas.AdvancedCluster {
 	project := testProject(ns)
 	deployment := v1.NewDeployment(project.Namespace, fakeDeployment, fakeDeployment)
@@ -477,8 +619,11 @@ func newTestDeploymentEnv(t *testing.T,
 
 func testK8sClient() client.Client {
 	sch := runtime.NewScheme()
-	sch.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.SecretList{})
 	sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasDeployment{})
+	sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasBackupSchedule{})
+	sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasBackupScheduleList{})
+	sch.AddKnownTypes(v1.GroupVersion, &v1.AtlasBackupPolicy{})
+	sch.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.SecretList{})
 	return fake.NewClientBuilder().WithScheme(sch).Build()
 }
 
@@ -535,3 +680,59 @@ func intoServerlessAtlasCluster(serverlessSpec *v1.ServerlessSpec) *mongodbatlas
 	}
 	return ac
 }
+
+func testDeploymentName(suffix string) types.NamespacedName {
+	return types.NamespacedName{
+		Name:      fmt.Sprintf("test-deployment%s", suffix),
+		Namespace: "test-namespace",
+	}
+}
+
+func testDeployment(suffix string, schedule *v1.AtlasBackupSchedule) *v1.AtlasDeployment {
+	dn := testDeploymentName(suffix)
+	return &v1.AtlasDeployment{
+		ObjectMeta: metav1.ObjectMeta{Name: dn.Name, Namespace: dn.Namespace},
+		Spec: v1.AtlasDeploymentSpec{
+			AdvancedDeploymentSpec: &v1.AdvancedDeploymentSpec{
+				Name: fmt.Sprintf("atlas-%s", dn.Name),
+			},
+			BackupScheduleRef: common.ResourceRefNamespaced{
+				Name:      schedule.Name,
+				Namespace: schedule.Namespace,
+			},
+		},
+	}
+}
+
+func testBackupSchedule(suffix string, policy *v1.AtlasBackupPolicy) *v1.AtlasBackupSchedule {
+	return &v1.AtlasBackupSchedule{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:       fmt.Sprintf("test-backup-schedule%s", suffix),
+			Namespace:  "test-namespace",
+			Finalizers: []string{customresource.FinalizerLabel},
+		},
+		Spec: v1.AtlasBackupScheduleSpec{
+			PolicyRef: common.ResourceRefNamespaced{Name: policy.Name, Namespace: policy.Namespace},
+		},
+	}
+}
+
+func testBackupPolicy() *v1.AtlasBackupPolicy {
+	return &v1.AtlasBackupPolicy{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:       "test-backup-policy",
+			Namespace:  "test-namespace",
+			Finalizers: []string{customresource.FinalizerLabel},
+		},
+		Spec: v1.AtlasBackupPolicySpec{
+			Items: []v1.AtlasBackupPolicyItem{
+				{
+					FrequencyType:     "weekly",
+					FrequencyInterval: 1,
+					RetentionUnit:     "days",
+					RetentionValue:    7,
+				},
+			},
+		},
+	}
+}
diff --git a/pkg/controller/atlasdeployment/backup.go b/pkg/controller/atlasdeployment/backup.go
index ba5530de22..ab8187035e 100644
--- a/pkg/controller/atlasdeployment/backup.go
+++ b/pkg/controller/atlasdeployment/backup.go
@@ -16,12 +16,11 @@ import (
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/watch"
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/workflow"
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/compat"
+	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/kube"
 
 	"go.mongodb.org/atlas/mongodbatlas"
 	"golang.org/x/sync/errgroup"
 
-	"k8s.io/apimachinery/pkg/types"
-
 	"sigs.k8s.io/controller-runtime/pkg/client"
 
 	mdbv1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1"
@@ -60,7 +59,7 @@ func (r *AtlasDeploymentReconciler) ensureBackupScheduleAndPolicy(
 		return err
 	}
 
-	bPolicy, err := r.ensureBackupPolicy(ctx, service, *bSchedule.Spec.PolicyRef.GetObject(bSchedule.Namespace), &resourcesToWatch)
+	bPolicy, err := r.ensureBackupPolicy(ctx, service, bSchedule, &resourcesToWatch)
 	if err != nil {
 		return err
 	}
@@ -78,7 +77,7 @@ func (r *AtlasDeploymentReconciler) ensureBackupSchedule(
 	bSchedule := &mdbv1.AtlasBackupSchedule{}
 	err := r.Client.Get(ctx, *backupScheduleRef, bSchedule)
 	if err != nil {
-		return nil, fmt.Errorf("%v backupschedule resource is not found. e: %w", *backupScheduleRef, err)
+		return nil, fmt.Errorf("%v backup schedule resource is not found. e: %w", *backupScheduleRef, err)
 	}
 
 	resourceVersionIsValid := customresource.ValidateResourceVersion(service, bSchedule, r.Log)
@@ -126,13 +125,14 @@ func (r *AtlasDeploymentReconciler) ensureBackupSchedule(
 func (r *AtlasDeploymentReconciler) ensureBackupPolicy(
 	ctx context.Context,
 	service *workflow.Context,
-	bPolicyRef types.NamespacedName,
+	bSchedule *mdbv1.AtlasBackupSchedule,
 	resourcesToWatch *[]watch.WatchedObject,
 ) (*mdbv1.AtlasBackupPolicy, error) {
+	bPolicyRef := *bSchedule.Spec.PolicyRef.GetObject(bSchedule.Namespace)
 	bPolicy := &mdbv1.AtlasBackupPolicy{}
 	err := r.Client.Get(ctx, bPolicyRef, bPolicy)
 	if err != nil {
-		return nil, fmt.Errorf("unable to get backuppolicy resource %s. e: %w", bPolicyRef.String(), err)
+		return nil, fmt.Errorf("unable to get backup policy resource %s. e: %w", bPolicyRef.String(), err)
 	}
 
 	resourceVersionIsValid := customresource.ValidateResourceVersion(service, bPolicy, r.Log)
@@ -142,7 +142,8 @@ func (r *AtlasDeploymentReconciler) ensureBackupPolicy(
 		return nil, errors.New(errText)
 	}
 
-	bPolicy.UpdateStatus([]status.Condition{}, status.AtlasBackupPolicySetScheduleID(bPolicyRef.String()))
+	scheduleRef := kube.ObjectKeyFromObject(bSchedule).String()
+	bPolicy.UpdateStatus([]status.Condition{}, status.AtlasBackupPolicySetScheduleID(scheduleRef))
 
 	if err = r.Client.Status().Update(ctx, bPolicy); err != nil {
 		r.Log.Errorw("failed to update BackupPolicy status", "error", err)
@@ -251,13 +252,13 @@ func (r *AtlasDeploymentReconciler) updateBackupScheduleAndPolicy(
 	}
 
 	if equal {
-		r.Log.Debug("backupschedules are equal, nothing to change")
+		r.Log.Debug("backup schedules are equal, nothing to change")
 		return nil
 	}
 
 	r.Log.Debugf("applying backup configuration: %v", *bSchedule)
 	if _, _, err := service.Client.CloudProviderSnapshotBackupPolicies.Update(ctx, projectID, clusterName, apiScheduleReq); err != nil {
-		return fmt.Errorf("unable to create backupschedule %s. e: %w", client.ObjectKeyFromObject(bSchedule).String(), err)
+		return fmt.Errorf("unable to create backup schedule %s. e: %w", client.ObjectKeyFromObject(bSchedule).String(), err)
 	}
 	r.Log.Infof("successfully updated backup configuration for deployment %v", clusterName)
 	return nil
@@ -313,9 +314,11 @@ func (r *AtlasDeploymentReconciler) garbageCollectBackupResource(ctx context.Con
 
 				backupSchedule.UpdateStatus([]status.Condition{}, status.AtlasBackupScheduleUnsetDeploymentID(clusterName))
 
+				lastScheduleRef := false
 				if len(backupSchedule.Status.DeploymentIDs) == 0 &&
 					customresource.HaveFinalizer(&backupSchedule, customresource.FinalizerLabel) {
 					customresource.UnsetFinalizer(&backupSchedule, customresource.FinalizerLabel)
+					lastScheduleRef = true
 				}
 
 				if err = r.Client.Update(ctx, &backupSchedule); err != nil {
@@ -323,7 +326,7 @@ func (r *AtlasDeploymentReconciler) garbageCollectBackupResource(ctx context.Con
 					return err
 				}
 
-				if backupSchedule.DeletionTimestamp.IsZero() {
+				if !lastScheduleRef {
 					continue
 				}
 
@@ -334,7 +337,8 @@ func (r *AtlasDeploymentReconciler) garbageCollectBackupResource(ctx context.Con
 					return fmt.Errorf("failed to retrieve list of backup schedules: %w", err)
 				}
 
-				bPolicy.UpdateStatus([]status.Condition{}, status.AtlasBackupPolicyUnsetScheduleID(bPolicyRef.String()))
+				scheduleRef := kube.ObjectKeyFromObject(&backupSchedule).String()
+				bPolicy.UpdateStatus([]status.Condition{}, status.AtlasBackupPolicyUnsetScheduleID(scheduleRef))
 
 				if len(bPolicy.Status.BackupScheduleIDs) == 0 &&
 					customresource.HaveFinalizer(bPolicy, customresource.FinalizerLabel) {
diff --git a/test/int/deployment_test.go b/test/int/deployment_test.go
index 024b0943fd..77153f327f 100644
--- a/test/int/deployment_test.go
+++ b/test/int/deployment_test.go
@@ -8,6 +8,7 @@ import (
 	"strconv"
 	"time"
 
+	k8serrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/types"
 
 	"github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/connectionsecret"
@@ -46,14 +47,14 @@ const (
 	PublicAPIKey            = "publicApiKey"
 )
 
-var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
-	const (
-		interval      = PollingInterval
-		intervalShort = time.Second * 2
-	)
+const (
+	interval      = PollingInterval
+	intervalShort = time.Second * 2
+)
 
+var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment", "deployment-non-backups"), func() {
 	var (
-		connectionSecret  corev1.Secret
+		connectionSecret  *corev1.Secret
 		createdProject    *mdbv1.AtlasProject
 		createdDeployment *mdbv1.AtlasDeployment
 		lastGeneration    int64
@@ -68,29 +69,8 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 		lastGeneration = 0
 		manualDeletion = false
 
-		connectionSecret = corev1.Secret{
-			ObjectMeta: metav1.ObjectMeta{
-				Name:      ConnectionSecretName,
-				Namespace: namespace.Name,
-				Labels: map[string]string{
-					connectionsecret.TypeLabelKey: connectionsecret.CredLabelVal,
-				},
-			},
-			StringData: map[string]string{OrgID: connection.OrgID, PublicAPIKey: connection.PublicKey, PrivateAPIKey: connection.PrivateKey},
-		}
-		By(fmt.Sprintf("Creating the Secret %s", kube.ObjectKeyFromObject(&connectionSecret)))
-		Expect(k8sClient.Create(context.Background(), &connectionSecret)).To(Succeed())
-
-		createdProject = mdbv1.DefaultProject(namespace.Name, connectionSecret.Name).WithIPAccessList(project.NewIPAccessList().WithCIDR("0.0.0.0/0"))
-		if DeploymentDevMode {
-			// While developing tests we need to reuse the same project
-			createdProject.Spec.Name = "dev-test atlas-project"
-		}
-		By("Creating the project " + createdProject.Name)
-		Expect(k8sClient.Create(context.Background(), createdProject)).To(Succeed())
-		Eventually(func() bool {
-			return testutil.CheckCondition(k8sClient, createdProject, status.TrueCondition(status.ReadyType))
-		}).WithTimeout(30 * time.Minute).WithPolling(interval).Should(BeTrue())
+		connectionSecret = createConnectionSecret()
+		createdProject = createProject(connectionSecret)
 	})
 
 	AfterEach(func() {
@@ -99,7 +79,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 		}
 		if manualDeletion && createdProject != nil {
 			By("Deleting the deployment in Atlas manually", func() {
-				// We need to remove the deployment in Atlas manually to let project get removed
+				// We need to remove the deployment in Atlas to let project get removed
 				_, err := atlasClient.AdvancedClusters.Delete(context.Background(), createdProject.ID(), createdDeployment.GetDeploymentName(), nil)
 				Expect(err).NotTo(HaveOccurred())
 				Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdDeployment.GetDeploymentName()), 600, interval).Should(BeTrue())
@@ -108,19 +88,10 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 		}
 		if createdProject != nil && createdProject.Status.ID != "" {
 			if createdDeployment != nil {
-				By("Removing Atlas Deployment " + createdDeployment.Name)
-				Expect(k8sClient.Delete(context.Background(), createdDeployment)).To(Succeed())
-				deploymentName := createdDeployment.GetDeploymentName()
-				if customresource.ResourceShouldBeLeftInAtlas(createdDeployment) || customresource.ReconciliationShouldBeSkipped(createdDeployment) {
-					By("Removing Atlas Deployment " + createdDeployment.Name + " from Atlas manually")
-					Expect(deleteAtlasDeployment(createdProject.Status.ID, deploymentName)).To(Succeed())
-				}
-				Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, deploymentName), 600, interval).Should(BeTrue())
+				deleteDeploymentFromKubernetes(createdProject, createdDeployment)
 			}
 
-			By("Removing Atlas Project " + createdProject.Status.ID)
-			Expect(k8sClient.Delete(context.Background(), createdProject)).To(Succeed())
-			Eventually(checkAtlasProjectRemoved(createdProject.Status.ID), 60, interval).Should(BeTrue())
+			deleteProjectFromKubernetes(createdProject)
 		}
 		removeControllersAndNamespace()
 	})
@@ -265,7 +236,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 
 			By("Filling token secret with invalid data", func() {
 				secret := &corev1.Secret{}
-				Expect(k8sClient.Get(context.Background(), kube.ObjectKeyFromObject(&connectionSecret), secret)).To(Succeed())
+				Expect(k8sClient.Get(context.Background(), kube.ObjectKeyFromObject(connectionSecret), secret)).To(Succeed())
 				secret.StringData = map[string]string{
 					OrgID: "fake", PrivateAPIKey: "fake", PublicAPIKey: "fake",
 				}
@@ -865,7 +836,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 				lastGeneration++
 			})
 
-			By(fmt.Sprintf("Update Instance Size Margins with AutoScaling for Deployemnt %s", kube.ObjectKeyFromObject(createdDeployment)), func() {
+			By(fmt.Sprintf("Update Instance Size Margins with AutoScaling for Deployment %s", kube.ObjectKeyFromObject(createdDeployment)), func() {
 				regionConfig := createdDeployment.Spec.AdvancedDeploymentSpec.ReplicationSpecs[0].RegionConfigs[0]
 				regionConfig.AutoScaling.Compute.MinInstanceSize = "M20"
 				regionConfig.ElectableSpecs.InstanceSize = "M20"
@@ -1028,10 +999,32 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 			})
 		})
 	})
+})
 
-	Describe("Create default deployment with backups enabled", func() {
-		It("Should succeed", func() {
-			backupPolicyDefault := &mdbv1.AtlasBackupPolicy{
+var _ = Describe("AtlasDeployment", Ordered, Label("int", "AtlasDeployment", "deployment-backups"), func() {
+	var (
+		connectionSecret  *corev1.Secret
+		createdProject    *mdbv1.AtlasProject
+		createdDeployment *mdbv1.AtlasDeployment
+
+		backupPolicyDefault   *mdbv1.AtlasBackupPolicy
+		backupScheduleDefault *mdbv1.AtlasBackupSchedule
+	)
+
+	BeforeAll(func() {
+		prepareControllers(false)
+		connectionSecret = createConnectionSecret()
+		createdProject = createProject(connectionSecret)
+	})
+
+	AfterAll(func() {
+		deleteProjectFromKubernetes(createdProject)
+		removeControllersAndNamespace()
+	})
+
+	Describe("Create default deployment with backups enabled", Label("basic-backups"), func() {
+		BeforeEach(func() {
+			backupPolicyDefault = &mdbv1.AtlasBackupPolicy{
 				ObjectMeta: metav1.ObjectMeta{
 					Name:      "policy-1",
 					Namespace: namespace.Name,
@@ -1049,7 +1042,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 				Status: status.BackupPolicyStatus{},
 			}
 
-			backupScheduleDefault := &mdbv1.AtlasBackupSchedule{
+			backupScheduleDefault = &mdbv1.AtlasBackupSchedule{
 				ObjectMeta: metav1.ObjectMeta{
 					Name:      "schedule-1",
 					Namespace: namespace.Name,
@@ -1074,7 +1067,15 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 				Name:      backupScheduleDefault.Name,
 				Namespace: backupScheduleDefault.Namespace,
 			})
+		})
 
+		AfterEach(func() {
+			deleteDeploymentFromKubernetes(createdProject, createdDeployment)
+			deleteBackupDefsFromKubernetes(backupScheduleDefault, backupPolicyDefault)
+		})
+
+		It("Should succeed", func() {
+			fmt.Println("hey!")
 			By(fmt.Sprintf("Creating deployment with backups enabled: %s", kube.ObjectKeyFromObject(createdDeployment)), func() {
 				Expect(k8sClient.Create(context.Background(), createdDeployment)).NotTo(HaveOccurred())
 
@@ -1121,8 +1122,8 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 		})
 	})
 
-	Describe("Create deployment with backups enabled and snapshot distribution", func() {
-		It("Should succeed", func() {
+	Describe("Create deployment with backups enabled and snapshot distribution", Label("snapshot-distribution"), func() {
+		BeforeEach(func() {
 			By("Creating deployment with backups enabled", func() {
 				createdDeployment = mdbv1.DefaultAwsAdvancedDeployment(namespace.Name, createdProject.Name)
 				createdDeployment.Spec.AdvancedDeploymentSpec.BackupEnabled = toptr.MakePtr(true)
@@ -1151,7 +1152,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 
 				replicaSetID := createdDeployment.Status.ReplicaSets[0].ID
 
-				backupPolicyDefault := &mdbv1.AtlasBackupPolicy{
+				backupPolicyDefault = &mdbv1.AtlasBackupPolicy{
 					ObjectMeta: metav1.ObjectMeta{
 						Name:      "policy-1",
 						Namespace: namespace.Name,
@@ -1168,7 +1169,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 					},
 					Status: status.BackupPolicyStatus{},
 				}
-				backupScheduleDefault := &mdbv1.AtlasBackupSchedule{
+				backupScheduleDefault = &mdbv1.AtlasBackupSchedule{
 					ObjectMeta: metav1.ObjectMeta{
 						Name:      bScheduleName,
 						Namespace: namespace.Name,
@@ -1202,28 +1203,36 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() {
 					Namespace: namespace.Name,
 				}
 				Expect(k8sClient.Update(context.Background(), createdDeployment)).NotTo(HaveOccurred())
+			})
+		})
 
-				Eventually(func(g Gomega) {
-					atlasCluster, _, err := atlasClient.AdvancedClusters.Get(context.Background(), createdProject.ID(), createdDeployment.Spec.AdvancedDeploymentSpec.Name)
-					g.Expect(err).Should(BeNil())
-					g.Expect(atlasCluster.StateName).Should(Equal("IDLE"))
-					g.Expect(*atlasCluster.BackupEnabled).Should(BeTrue())
+		AfterEach(func() {
+			deleteDeploymentFromKubernetes(createdProject, createdDeployment)
+			deleteBackupDefsFromKubernetes(backupScheduleDefault, backupPolicyDefault)
+		})
 
-					atlasBSchedule, _, err := atlasClient.CloudProviderSnapshotBackupPolicies.Get(context.Background(), createdProject.ID(), createdDeployment.Spec.AdvancedDeploymentSpec.Name)
-					g.Expect(err).Should(BeNil())
-					g.Expect(len(atlasBSchedule.CopySettings)).ShouldNot(Equal(0))
-					g.Expect(atlasBSchedule.CopySettings[0]).
-						Should(Equal(
-							mongodbatlas.CopySetting{
-								CloudProvider:     toptr.MakePtr("AWS"),
-								RegionName:        toptr.MakePtr("US_WEST_1"),
-								ReplicationSpecID: toptr.MakePtr(replicaSetID),
-								ShouldCopyOplogs:  toptr.MakePtr(false),
-								Frequencies:       []string{"MONTHLY"},
-							},
-						))
-				}).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Not(HaveOccurred()))
-			})
+		It("Should succeed", func() {
+			replicaSetID := createdDeployment.Status.ReplicaSets[0].ID
+			Eventually(func(g Gomega) {
+				atlasCluster, _, err := atlasClient.AdvancedClusters.Get(context.Background(), createdProject.ID(), createdDeployment.Spec.AdvancedDeploymentSpec.Name)
+				g.Expect(err).Should(BeNil())
+				g.Expect(atlasCluster.StateName).Should(Equal("IDLE"))
+				g.Expect(*atlasCluster.BackupEnabled).Should(BeTrue())
+
+				atlasBSchedule, _, err := atlasClient.CloudProviderSnapshotBackupPolicies.Get(context.Background(), createdProject.ID(), createdDeployment.Spec.AdvancedDeploymentSpec.Name)
+				g.Expect(err).Should(BeNil())
+				g.Expect(len(atlasBSchedule.CopySettings)).ShouldNot(Equal(0))
+				g.Expect(atlasBSchedule.CopySettings[0]).
+					Should(Equal(
+						mongodbatlas.CopySetting{
+							CloudProvider:     toptr.MakePtr("AWS"),
+							RegionName:        toptr.MakePtr("US_WEST_1"),
+							ReplicationSpecID: toptr.MakePtr(replicaSetID),
+							ShouldCopyOplogs:  toptr.MakePtr(false),
+							Frequencies:       []string{"MONTHLY"},
+						},
+					))
+			}).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Not(HaveOccurred()))
 		})
 	})
 })
@@ -1329,3 +1338,74 @@ func float64ptr(f float64) *float64 {
 func boolptr(b bool) *bool {
 	return &b
 }
+
+func createConnectionSecret() *corev1.Secret {
+	var connectionSecret corev1.Secret
+	By(fmt.Sprintf("Creating the Secret %s", kube.ObjectKeyFromObject(&connectionSecret)), func() {
+		connectionSecret = corev1.Secret{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      ConnectionSecretName,
+				Namespace: namespace.Name,
+				Labels: map[string]string{
+					connectionsecret.TypeLabelKey: connectionsecret.CredLabelVal,
+				},
+			},
+			StringData: map[string]string{OrgID: connection.OrgID, PublicAPIKey: connection.PublicKey, PrivateAPIKey: connection.PrivateKey},
+		}
+		Expect(k8sClient.Create(context.Background(), &connectionSecret)).To(Succeed())
+	})
+	return &connectionSecret
+}
+
+func createProject(connectionSecret *corev1.Secret) *mdbv1.AtlasProject {
+	createdProject := mdbv1.DefaultProject(namespace.Name, connectionSecret.Name).WithIPAccessList(project.NewIPAccessList().WithCIDR("0.0.0.0/0"))
+	By("Creating the project "+createdProject.Name, func() {
+		if DeploymentDevMode {
+			// While developing tests we need to reuse the same project
+			createdProject.Spec.Name = "dev-test atlas-project"
+		}
+		Expect(k8sClient.Create(context.Background(), createdProject)).To(Succeed())
+		Eventually(func() bool {
+			return testutil.CheckCondition(k8sClient, createdProject, status.TrueCondition(status.ReadyType))
+		}).WithTimeout(30 * time.Minute).WithPolling(interval).Should(BeTrue())
+	})
+	return createdProject
+}
+
+func deleteBackupDefsFromKubernetes(schedule *mdbv1.AtlasBackupSchedule, policy *mdbv1.AtlasBackupPolicy) {
+	By("Deleting the schedule and policy in Kubernetes (should have no finalizers by now)", func() {
+		Expect(k8sClient.Delete(context.Background(), schedule)).NotTo(HaveOccurred())
+		Expect(k8sClient.Delete(context.Background(), policy)).NotTo(HaveOccurred())
+
+		policyRef := kube.ObjectKey(policy.Namespace, policy.Name)
+		Eventually(func() bool {
+			p := &mdbv1.AtlasBackupPolicy{}
+			return k8serrors.IsNotFound(k8sClient.Get(context.Background(), policyRef, p))
+		}).WithTimeout(30 * time.Second).WithPolling(PollingInterval).Should(BeTrue())
+
+		scheduleRef := kube.ObjectKey(schedule.Namespace, schedule.Name)
+		Eventually(func() bool {
+			s := &mdbv1.AtlasBackupSchedule{}
+			return k8serrors.IsNotFound(k8sClient.Get(context.Background(), scheduleRef, s))
+		}).WithTimeout(30 * time.Second).WithPolling(PollingInterval).Should(BeTrue())
+	})
+}
+
+func deleteDeploymentFromKubernetes(project *mdbv1.AtlasProject, deployment *mdbv1.AtlasDeployment) {
+	By(fmt.Sprintf("Removing Atlas Deployment %q", deployment.Name), func() {
+		Expect(k8sClient.Delete(context.Background(), deployment)).To(Succeed())
+		deploymentName := deployment.GetDeploymentName()
+		if customresource.ResourceShouldBeLeftInAtlas(deployment) || customresource.ReconciliationShouldBeSkipped(deployment) {
+			By("Removing Atlas Deployment " + deployment.Name + " from Atlas manually")
+			Expect(deleteAtlasDeployment(project.Status.ID, deploymentName)).To(Succeed())
+		}
+		Eventually(checkAtlasDeploymentRemoved(project.Status.ID, deploymentName), 600, interval).Should(BeTrue())
+	})
+}
+
+func deleteProjectFromKubernetes(project *mdbv1.AtlasProject) {
+	By(fmt.Sprintf("Removing Atlas Project %s", project.Status.ID), func() {
+		Expect(k8sClient.Delete(context.Background(), project)).To(Succeed())
+		Eventually(checkAtlasProjectRemoved(project.Status.ID), 60, interval).Should(BeTrue())
+	})
+}