From 6ab815a26070dd5c294a462b29504d382b1da00d Mon Sep 17 00:00:00 2001 From: Annaraya Narasagond Date: Thu, 2 Jan 2025 23:31:59 +0530 Subject: [PATCH 01/24] e2e: adding testcases with recipe Signed-off-by: Annaraya Narasagond --- e2e/deployers/discoveredapp.go | 225 ++++++++++++++++++++++++++++++++- e2e/dractions/discovered.go | 11 ++ e2e/exhaustive_suite_test.go | 15 ++- e2e/go.mod | 13 +- e2e/go.sum | 48 +++---- e2e/util/context.go | 5 + 6 files changed, 283 insertions(+), 34 deletions(-) diff --git a/e2e/deployers/discoveredapp.go b/e2e/deployers/discoveredapp.go index 369d6eb46..c87e1162f 100644 --- a/e2e/deployers/discoveredapp.go +++ b/e2e/deployers/discoveredapp.go @@ -4,17 +4,36 @@ package deployers import ( + "context" "fmt" "os" "os/exec" "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" + recipe "github.com/ramendr/recipe/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) -type DiscoveredApp struct{} +const timeout = 300 + +type DiscoveredApp struct { + IncludeRecipe bool + IncludeHooks bool +} func (d DiscoveredApp) GetName() string { + if d.IncludeRecipe { + if d.IncludeHooks { + return "disapp-recipe-hooks" + } + + return "disapp-recipe" + } + return "disapp" } @@ -68,6 +87,16 @@ func (d DiscoveredApp) Deploy(ctx types.Context) error { log.Info("Workload deployed") + // recipe needs to be created based on flags + if d.IncludeRecipe { + recipeName := ctx.Name() + "-recipe" + if err := createRecipe(recipeName, appNamespace, d.IncludeHooks); err != nil { + log.Info("recipe creation failed") + } + + log.Info("recipe created on both dr clusters") + } + return nil } @@ -96,6 +125,22 @@ func (d DiscoveredApp) Undeploy(ctx types.Context) error { return err } + if d.IncludeRecipe { + recipeName := ctx.Name() + "-recipe" + + log.Infof("Deleting recipe on cluster %q", drpolicy.Spec.DRClusters[0]) + + if err := deleteRecipe(util.Ctx.C1.Client, recipeName, appNamespace); err != nil { + return err + } + + log.Infof("Deleting recipe on cluster %q", drpolicy.Spec.DRClusters[1]) + + if err := deleteRecipe(util.Ctx.C2.Client, recipeName, appNamespace); err != nil { + return err + } + } + log.Infof("Deleting namespace %q on cluster %q", appNamespace, drpolicy.Spec.DRClusters[0]) // delete namespace on both clusters @@ -117,3 +162,181 @@ func (d DiscoveredApp) Undeploy(ctx types.Context) error { func (d DiscoveredApp) IsDiscovered() bool { return true } + +func createRecipe(name, namespace string, includeHooks bool) error { + var recipe *recipe.Recipe + if includeHooks { + recipe = getRecipeWithHooks(name, namespace) + } else { + recipe = getRecipeWithoutHooks(name, namespace) + } + + err := util.Ctx.C1.Client.Create(context.Background(), recipe) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("recipe " + name + " already exists" + " in the cluster " + "C1") + } + + err = util.Ctx.C2.Client.Create(context.Background(), recipe) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("recipe " + name + " already exists" + " in the cluster " + "C2") + } + + return nil +} + +func getRecipeWithoutHooks(name, namespace string) *recipe.Recipe { + return &recipe.Recipe{ + TypeMeta: metav1.TypeMeta{ + Kind: "Recipe", + APIVersion: "ramendr.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: recipe.RecipeSpec{ + AppType: "busybox", + Groups: []*recipe.Group{ + { + Name: "rg1", + Type: "resource", + BackupRef: "rg1", + IncludedNamespaces: []string{ + namespace, + }, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "appname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"busybox"}, + }, + }, + }, + }, + }, + Workflows: []*recipe.Workflow{ + { + Name: "backup", + Sequence: []map[string]string{ + { + "group": "rg1", + }, + }, + }, + { + Name: "restore", + Sequence: []map[string]string{ + { + "group": "rg1", + }, + }, + }, + }, + }, + } +} + +func getRecipeWithHooks(name, namespace string) *recipe.Recipe { + return &recipe.Recipe{ + TypeMeta: metav1.TypeMeta{ + Kind: "Recipe", + APIVersion: "ramendr.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: recipe.RecipeSpec{ + AppType: "busybox", + Groups: []*recipe.Group{ + { + Name: "rg1", + Type: "resource", + BackupRef: "rg1", + IncludedNamespaces: []string{ + namespace, + }, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "appname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"busybox"}, + }, + }, + }, + }, + }, + Hooks: []*recipe.Hook{ + getHookSpec(namespace, "backup"), + getHookSpec(namespace, "restore"), + }, + Workflows: []*recipe.Workflow{ + { + Name: "backup", + Sequence: []map[string]string{ + { + "hook": "backup/check-replicas", + }, + { + "group": "rg1", + }, + }, + }, + { + Name: "restore", + Sequence: []map[string]string{ + { + "group": "rg1", + }, + { + "hook": "restore/check-replicas", + }, + }, + }, + }, + }, + } +} + +func getHookSpec(namespace, hookType string) *recipe.Hook { + return &recipe.Hook{ + Name: hookType, + Type: "check", + Namespace: namespace, + NameSelector: "busybox", + SelectResource: "deployment", + Timeout: timeout, + Chks: []*recipe.Check{ + { + Name: "check-replicas", + Condition: "{$.spec.replicas} == {$.status.readyReplicas}", + }, + }, + } +} + +func deleteRecipe(client client.Client, name, namespace string) error { + r := &recipe.Recipe{} + key := k8stypes.NamespacedName{Namespace: namespace, Name: name} + + err := client.Get(context.Background(), key, r) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + return nil + } + + return client.Delete(context.Background(), r) +} diff --git a/e2e/dractions/discovered.go b/e2e/dractions/discovered.go index 84b5ab854..b365e2d87 100644 --- a/e2e/dractions/discovered.go +++ b/e2e/dractions/discovered.go @@ -47,6 +47,17 @@ func EnableProtectionDiscoveredApps(ctx types.Context) error { drpc := generateDRPCDiscoveredApps( name, managementNamespace, clusterName, drPolicyName, placementName, appname, appNamespace) + + if v, ok := ctx.Deployer().(deployers.DiscoveredApp); ok { + if v.IncludeRecipe { + recipeName := name + "-recipe" + drpc.Spec.KubeObjectProtection.RecipeRef = &ramen.RecipeRef{ + Namespace: appNamespace, + Name: recipeName, + } + } + } + if err = createDRPC(util.Ctx.Hub.Client, drpc); err != nil { return err } diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index cd794f56d..675fd9b2b 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -25,11 +25,16 @@ const ( ) var ( - Workloads = []types.Workload{} - subscription = &deployers.Subscription{} - appset = &deployers.ApplicationSet{} - discoveredApps = &deployers.DiscoveredApp{} - Deployers = []types.Deployer{subscription, appset, discoveredApps} + Workloads = []types.Workload{} + subscription = &deployers.Subscription{} + appset = &deployers.ApplicationSet{} + discoveredApps = &deployers.DiscoveredApp{} + discoveredAppsWithoutHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: false} + discoveredAppsWithHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: true} + Deployers = []types.Deployer{ + subscription, appset, discoveredApps, discoveredAppsWithoutHook, + discoveredAppsWithHook, + } ) func generateWorkloads([]types.Workload) { diff --git a/e2e/go.mod b/e2e/go.mod index 76229bff2..1f6e35e47 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -6,6 +6,7 @@ toolchain go1.22.7 require ( github.com/ramendr/ramen/api v0.0.0-00010101000000-000000000000 + github.com/ramendr/recipe v0.0.0-20241009174526-5cecfd571447 github.com/spf13/viper v1.19.0 go.uber.org/zap v1.27.0 k8s.io/api v0.31.1 @@ -21,7 +22,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -57,13 +58,13 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/time v0.6.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -71,8 +72,8 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240411171206-dc4e619f62f3 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/e2e/go.sum b/e2e/go.sum index f5a24ed73..a1225992c 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -6,8 +6,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -45,8 +45,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= -github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -59,6 +59,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -88,14 +90,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/ramendr/recipe v0.0.0-20241009174526-5cecfd571447 h1:RSb0XKjpxH0qln4a8Ebm5TTtrW2E3uLhdJs6FSMf8ik= +github.com/ramendr/recipe v0.0.0-20241009174526-5cecfd571447/go.mod h1:dGXrk743fq6VG8u6lflEce7ITM7d/9xSBeAbI2RXl9s= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= @@ -137,8 +141,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -147,8 +151,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -163,14 +167,14 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -197,8 +201,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= @@ -207,12 +211,12 @@ k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240411171206-dc4e619f62f3 h1:SbdLaI6mM6ffDSJCadEaD4IkuPzepLDGlkd2xV0t1uA= -k8s.io/kube-openapi v0.0.0-20240411171206-dc4e619f62f3/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.15.0 h1:lRee1KOlGHZb2scTA7ff9E9Fxt2hJc7jpkHnaCbvkOU= open-cluster-management.io/api v0.15.0/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= open-cluster-management.io/multicloud-operators-channel v0.15.0 h1:5DjxcZrhQhS/+A+zYx0/tUqKqE8m4mped3Gb9aRt6CE= diff --git a/e2e/util/context.go b/e2e/util/context.go index 80c08af25..6f18ff34f 100644 --- a/e2e/util/context.go +++ b/e2e/util/context.go @@ -20,6 +20,7 @@ import ( ramen "github.com/ramendr/ramen/api/v1alpha1" argocdv1alpha1hack "github.com/ramendr/ramen/e2e/argocd" + recipe "github.com/ramendr/recipe/api/v1alpha1" subscription "open-cluster-management.io/multicloud-operators-subscription/pkg/apis" placementrule "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" ) @@ -60,6 +61,10 @@ func addToScheme(scheme *runtime.Scheme) error { return err } + if err := recipe.AddToScheme(scheme); err != nil { + return err + } + return ramen.AddToScheme(scheme) } From 59eb075321902fb693a1474f8a55894da0811d49 Mon Sep 17 00:00:00 2001 From: Annaraya Narasagond Date: Wed, 8 Jan 2025 19:40:21 +0530 Subject: [PATCH 02/24] e2e: adding volumes section to recipe Signed-off-by: Annaraya Narasagond --- e2e/deployers/discoveredapp.go | 177 ++++++++++++++++++++++++++++++--- e2e/exhaustive_suite_test.go | 18 ++-- 2 files changed, 175 insertions(+), 20 deletions(-) diff --git a/e2e/deployers/discoveredapp.go b/e2e/deployers/discoveredapp.go index c87e1162f..2cb359cf1 100644 --- a/e2e/deployers/discoveredapp.go +++ b/e2e/deployers/discoveredapp.go @@ -12,7 +12,10 @@ import ( "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" recipe "github.com/ramendr/recipe/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -21,13 +24,18 @@ import ( const timeout = 300 type DiscoveredApp struct { - IncludeRecipe bool - IncludeHooks bool + IncludeRecipe bool + IncludeHooks bool + IncludeVolumes bool } func (d DiscoveredApp) GetName() string { if d.IncludeRecipe { if d.IncludeHooks { + if d.IncludeVolumes { + return "disapp-rhv" + } + return "disapp-recipe-hooks" } @@ -90,16 +98,132 @@ func (d DiscoveredApp) Deploy(ctx types.Context) error { // recipe needs to be created based on flags if d.IncludeRecipe { recipeName := ctx.Name() + "-recipe" - if err := createRecipe(recipeName, appNamespace, d.IncludeHooks); err != nil { + if err := d.createRecipe(recipeName, appNamespace); err != nil { log.Info("recipe creation failed") } log.Info("recipe created on both dr clusters") } + if d.IncludeHooks && d.IncludeRecipe && d.IncludeVolumes { + deployment := getDeployment(appNamespace) + err := util.Ctx.C1.Client.Create(context.Background(), deployment) + if err != nil { + log.Error("error during creation of deployment") + } + + pvc := getPvc(appNamespace) + err = util.Ctx.C1.Client.Create(context.Background(), pvc) + if err != nil { + log.Error("error during creation of pvc") + } + } + return nil } +func getPvc(ns string) *corev1.PersistentVolumeClaim { + scName := "rook-ceph-block" + return &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox-pvc-vol", + Namespace: ns, + Labels: map[string]string{ + "appname": "busybox-vol", + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: &scName, + }, + } +} + +func getDeployment(ns string) *appsv1.Deployment { + var i int32 = 1 + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "appname": "busybox-vol", + }, + Name: "busybox-vol", + Namespace: ns, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &i, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "appname": "busybox-vol", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "appname": "busybox-vol", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Command: []string{ + "sh", + "-c", + `emit() { + echo "$(date) $1" | tee -a /var/log/ramen.log + sync + } + trap "emit STOP; exit" TERM + emit START + while true; do + sleep 10 & wait + emit UPDATE + done`, + }, + Image: "quay.io/nirsof/busybox:stable", + ImagePullPolicy: "IfNotPresent", + Name: "logger", + TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "File", + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/var/log", + Name: "varlog", + }, + }, + }, + }, + DNSPolicy: corev1.DNSClusterFirst, + Volumes: []corev1.Volume{ + { + Name: "varlog", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "busybox-pvc-vol", + }, + }, + }, + }, + }, + }, + }, + } +} + // Undeploy deletes the workload from the managed clusters. func (d DiscoveredApp) Undeploy(ctx types.Context) error { log := ctx.Logger() @@ -163,15 +287,25 @@ func (d DiscoveredApp) IsDiscovered() bool { return true } -func createRecipe(name, namespace string, includeHooks bool) error { - var recipe *recipe.Recipe - if includeHooks { +func (d DiscoveredApp) getRecipe(name, namespace string) *recipe.Recipe { + var recipe recipe.Recipe + if d.IncludeHooks { recipe = getRecipeWithHooks(name, namespace) + if d.IncludeVolumes { + volumes := getVolumes(namespace) + + recipe.Spec.Volumes = volumes + // along with these changes another namespace or within the same ns, + // pod and pvc should be created which recipe volumes will refer to + } } else { recipe = getRecipeWithoutHooks(name, namespace) } + return &recipe +} - err := util.Ctx.C1.Client.Create(context.Background(), recipe) +func (d DiscoveredApp) createRecipe(name, namespace string) error { + err := util.Ctx.C1.Client.Create(context.Background(), d.getRecipe(name, namespace)) if err != nil { if !errors.IsAlreadyExists(err) { return err @@ -180,7 +314,7 @@ func createRecipe(name, namespace string, includeHooks bool) error { util.Ctx.Log.Info("recipe " + name + " already exists" + " in the cluster " + "C1") } - err = util.Ctx.C2.Client.Create(context.Background(), recipe) + err = util.Ctx.C2.Client.Create(context.Background(), d.getRecipe(name, namespace)) if err != nil { if !errors.IsAlreadyExists(err) { return err @@ -192,8 +326,27 @@ func createRecipe(name, namespace string, includeHooks bool) error { return nil } -func getRecipeWithoutHooks(name, namespace string) *recipe.Recipe { - return &recipe.Recipe{ +func getVolumes(ns string) *recipe.Group { + return &recipe.Group{ + IncludedNamespaces: []string{ + ns, + }, + Name: "volumes-test", + Type: "volume", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "appname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"busybox-vol"}, + }, + }, + }, + } +} + +func getRecipeWithoutHooks(name, namespace string) recipe.Recipe { + return recipe.Recipe{ TypeMeta: metav1.TypeMeta{ Kind: "Recipe", APIVersion: "ramendr.openshift.io/v1alpha1", @@ -245,8 +398,8 @@ func getRecipeWithoutHooks(name, namespace string) *recipe.Recipe { } } -func getRecipeWithHooks(name, namespace string) *recipe.Recipe { - return &recipe.Recipe{ +func getRecipeWithHooks(name, namespace string) recipe.Recipe { + return recipe.Recipe{ TypeMeta: metav1.TypeMeta{ Kind: "Recipe", APIVersion: "ramendr.openshift.io/v1alpha1", diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index 675fd9b2b..6011924a6 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -25,15 +25,17 @@ const ( ) var ( - Workloads = []types.Workload{} - subscription = &deployers.Subscription{} - appset = &deployers.ApplicationSet{} - discoveredApps = &deployers.DiscoveredApp{} - discoveredAppsWithoutHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: false} - discoveredAppsWithHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: true} - Deployers = []types.Deployer{ + Workloads = []types.Workload{} + subscription = &deployers.Subscription{} + appset = &deployers.ApplicationSet{} + discoveredApps = &deployers.DiscoveredApp{} + discoveredAppsWithoutHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: false} + discoveredAppsWithHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: true} + discoveredAppsWithHookAndVol = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: true, + IncludeVolumes: true} + Deployers = []types.Deployer{ subscription, appset, discoveredApps, discoveredAppsWithoutHook, - discoveredAppsWithHook, + discoveredAppsWithHook, discoveredAppsWithHookAndVol, } ) From 637e9a81274c50aed64b0d3a211158fd498ab9e2 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Fri, 3 Jan 2025 02:11:57 -0500 Subject: [PATCH 03/24] e2e: add a sample config file as config.yaml.sample and remove the config.yaml from the git repo. This way users don't have to revert their changes to the config file before commiting changes. Signed-off-by: Raghavendra Talur --- .github/workflows/e2e.yaml | 1 + .gitignore | 3 +++ e2e/{config.yaml => config.yaml.sample} | 0 3 files changed, 4 insertions(+) rename e2e/{config.yaml => config.yaml.sample} (100%) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 8e26bed02..1f6a483ae 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -72,6 +72,7 @@ jobs: - name: Run e2e tests run: | + cat e2e/config.yaml.sample >> e2e/config.yaml cat ~/.config/drenv/rdr/config.yaml >> e2e/config.yaml make e2e-rdr diff --git a/.gitignore b/.gitignore index f4de2acb8..36813ec25 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,9 @@ /bin /testbin/* +# config files +/e2e/config.yaml + # Test binary, build with `go test -c` *.test diff --git a/e2e/config.yaml b/e2e/config.yaml.sample similarity index 100% rename from e2e/config.yaml rename to e2e/config.yaml.sample From 5d4f75ca7458e03ccf08573eefc1db4de41e08e0 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Tue, 17 Dec 2024 03:54:07 -0500 Subject: [PATCH 04/24] Enable VolSync protection for any PVC type via DRPC annotation VolSync protection can now be enabled for any PVC type by adding a specific annotation to the DRPC. Signed-off-by: Benamar Mekhissi --- internal/controller/drplacementcontrol.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index fea58b024..b9cd51199 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -1711,6 +1711,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation], DRPCUIDAnnotation: string(d.instance.UID), rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation], + rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], } vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces From d173ef8f6fcca4a436b0486b73d12e0455190b88 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Tue, 17 Dec 2024 03:55:43 -0500 Subject: [PATCH 05/24] Restrict ReadOnlyMany accessMode to CephFS PVCs only Previously, temporary PVCs for all storage types were being created with the ReadOnlyMany access mode. This commit ensures that only CephFS temporary PVCs will use the ReadOnlyMany access mode. Additionally, the need for a special storage class tagged for backingSnapshot has been removed. Instead, the same storage class used for ReadWriteMany will now be used to create temporary ReadOnlyMany PVCs, simplifying the storage configuration. Signed-off-by: Benamar Mekhissi --- internal/controller/volsync/vshandler.go | 88 ++++--------------- internal/controller/volsync/vshandler_test.go | 10 ++- 2 files changed, 24 insertions(+), 74 deletions(-) diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index 03b1f9b1e..e67e94912 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -408,13 +408,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo return nil, err } - volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) - if err != nil { - return nil, err - } + v.ModifyRSSpecForCephFS(&rsSpec, storageClass) - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(&rsSpec, storageClass) + volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) if err != nil { return nil, err } @@ -1338,60 +1334,13 @@ func (v *VSHandler) getRsyncServiceType() *corev1.ServiceType { // For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of // files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of // the source PVC to sync with the replicationdestination. -// This workaround follows the instructions here: -// https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md -// -// Steps: -// 1. If the storageclass detected is cephfs, create a new storageclass with backingSnapshot: "true" parameter -// (or reuse if it already exists). If not cephfs, return and do not modify rsSpec. -// 2. Modify rsSpec to use the new storageclass and also update AccessModes to 'ReadOnlyMany' as per the instructions -// above. +// If CephFS PVC, modify rsSpec AccessModes to use 'ReadOnlyMany'. func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec, storageClass *storagev1.StorageClass, -) error { - if storageClass.Provisioner != v.defaultCephFSCSIDriverName { - return nil // No workaround required - } - - v.log.Info("CephFS storageclass detected on source PVC, creating replicationsource with read-only "+ - " PVC from snapshot", "storageClassName", storageClass.GetName()) - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - // Note - not copying volumebindingmode or reclaim policy from the source storageclass will leave defaults - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return fmt.Errorf("%w", err) +) { + if storageClass.Provisioner == v.defaultCephFSCSIDriverName { + rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} } - - v.log.Info("StorageClass for readonly cephfs PVC createOrUpdate Complete", "op", op) - - // Update the rsSpec with access modes and the special storageclass - rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - rsSpec.ProtectedPVC.StorageClassName = &readOnlyPVCStorageClass.Name - - return nil } func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error) { @@ -1821,21 +1770,10 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, ) { v.log.Info("Reconciling localRS", "RD", rd.GetName()) - storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) - if err != nil { - return nil, err - } - rsSpec := &ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: rdSpec.ProtectedPVC, } - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(rsSpec, storageClass) - if err != nil { - return nil, err - } - pvc, err := v.setupLocalRS(rd, rdSpec, snapshotRef) if err != nil { return nil, err @@ -1983,10 +1921,10 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } // In all other cases, we have to create a RO PVC. - return v.createReadOnlyPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) + return v.createPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) } -func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, +func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, rdSpec *ramendrv1alpha1.VolSyncReplicationDestinationSpec, snapshotRef *corev1.TypedLocalObjectReference, snapRestoreSize *resource.Quantity, @@ -1994,6 +1932,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize) + storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) + if err != nil { + return nil, err + } + pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: snapshotRef.Name, @@ -2017,6 +1960,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + // Use the protectedPVC accessModes when csi driver is not the default (openshift-storage.cephfs.csi.ceph.com) + if storageClass.Provisioner != v.defaultCephFSCSIDriverName { + accessModes = rdSpec.ProtectedPVC.AccessModes + } + if pvc.CreationTimestamp.IsZero() { // set immutable fields pvc.Spec.AccessModes = accessModes pvc.Spec.StorageClassName = rd.Spec.RsyncTLS.StorageClassName diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 67f0dc8be..92b6087af 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -252,10 +252,12 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, storageClassForTest)).To(Succeed()) - // - // Call ModifyRSSpecForCephFS - // - Expect(vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest)).To(Succeed()) + vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest) + + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadOnlyMany, + })) }) Context("When the source PVC is not using a cephfs storageclass", func() { From 0b89d361e2160f1c4b236af972840698ed4e4684 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Wed, 18 Dec 2024 02:24:56 -0500 Subject: [PATCH 06/24] Support original accessModes for non-CephFS source PVCs For non-CephFS PVCs, temporary PVCs now inherit the accessModes of the source PVC rather than using ReadOnlyMany. Signed-off-by: Benamar Mekhissi --- internal/controller/cephfscg/utils.go | 51 ------------------- .../cephfscg/volumegroupsourcehandler.go | 19 ++++--- 2 files changed, 11 insertions(+), 59 deletions(-) diff --git a/internal/controller/cephfscg/utils.go b/internal/controller/cephfscg/utils.go index 9790ab32f..070232df9 100644 --- a/internal/controller/cephfscg/utils.go +++ b/internal/controller/cephfscg/utils.go @@ -11,10 +11,8 @@ import ( "github.com/ramendr/ramen/internal/controller/volsync" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) // ------------- [Begin] Copied from existing code in Ramen ---- @@ -54,55 +52,6 @@ func getLocalServiceNameForRD(rdName string) string { return fmt.Sprintf("volsync-rsync-tls-dst-%s", rdName) } -// ------------- [End] Copied from existing code in Ramen ---- - -// ------------- [Begin] Edited from existing code in Ramen ---- - -// Copied from func (v *VSHandler) ModifyRSSpecForCephFS -func GetRestoreStorageClass( - ctx context.Context, k8sClient client.Client, storageClassName string, - defaultCephFSCSIDriverName string, -) (*storagev1.StorageClass, error) { - storageClass, err := GetStorageClass(ctx, k8sClient, &storageClassName) - if err != nil { - return nil, err - } - - if storageClass.Provisioner != defaultCephFSCSIDriverName { - return storageClass, nil // No workaround required - } - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - _, err = ctrlutil.CreateOrUpdate(ctx, k8sClient, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("%w", err) - } - - return readOnlyPVCStorageClass, nil -} - // Copied from func (v *VSHandler) getStorageClass( func GetStorageClass( ctx context.Context, k8sClient client.Client, storageClassName *string, diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 8e962451b..0daa9195a 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -246,10 +246,14 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } - restoreStorageClass, err := GetRestoreStorageClass(ctx, h.Client, - *pvc.Spec.StorageClassName, h.DefaultCephFSCSIDriverName) + storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) if err != nil { - return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+pvc.Namespace, err) + return nil, err + } + + restoreAccessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + if storageClass.Provisioner != h.DefaultCephFSCSIDriverName { + restoreAccessModes = pvc.Spec.AccessModes } RestoredPVCNamespacedName := types.NamespacedName{ @@ -258,7 +262,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( } if err := h.RestoreVolumesFromSnapshot( ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName, - restoreStorageClass.GetName(), owner); err != nil { + restoreAccessModes, owner); err != nil { return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err) } @@ -286,7 +290,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( vsName string, pvc *corev1.PersistentVolumeClaim, restoredPVCNamespacedname types.NamespacedName, - restoreStorageClassName string, + restoreAccessModes []corev1.PersistentVolumeAccessMode, owner metav1.Object, ) error { logger := h.Logger.WithName("RestoreVolumesFromSnapshot"). @@ -351,8 +355,8 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( } if restoredPVC.CreationTimestamp.IsZero() { // set immutable fields - restoredPVC.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - restoredPVC.Spec.StorageClassName = &restoreStorageClassName + restoredPVC.Spec.AccessModes = restoreAccessModes + restoredPVC.Spec.StorageClassName = pvc.Spec.StorageClassName restoredPVC.Spec.DataSource = &snapshotRef } @@ -424,7 +428,6 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC } replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{ ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, CopyMethod: volsyncv1alpha1.CopyMethodDirect, }, From ab2c18f83194d018e2585b44bcb51b52beb1be62 Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Wed, 18 Dec 2024 02:42:36 -0500 Subject: [PATCH 07/24] Delay temporary PVC creation until scheduled time on fresh deploy For consistency groups, temporary PVCs will now be created at the scheduled time rather than immediately upon fresh deployment. Signed-off-by: Benamar Mekhissi --- .../cephfscg/replicationgroupsource.go | 18 +++++++++--------- .../cephfscg/volumegroupsourcehandler.go | 4 ++-- internal/controller/vrg_volrep.go | 4 ++-- internal/controller/vrg_volsync.go | 6 ++++++ 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/internal/controller/cephfscg/replicationgroupsource.go b/internal/controller/cephfscg/replicationgroupsource.go index 477d3fbc3..d217d0e1d 100644 --- a/internal/controller/cephfscg/replicationgroupsource.go +++ b/internal/controller/cephfscg/replicationgroupsource.go @@ -113,15 +113,6 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), err } - m.Logger.Info("Restore PVCs from volume group snapshot") - - restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) - if err != nil { - m.Logger.Error(err, "Failed to restore volume group snapshot") - - return mover.InProgress(), err - } - m.Logger.Info("Create ReplicationSource for each Restored PVC") vrgName := m.ReplicationGroupSource.GetLabels()[volsync.VRGOwnerNameLabel] // Pre-allocated shared secret - DRPC will generate and propagate this secret from hub to clusters @@ -141,6 +132,15 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), nil } + m.Logger.Info("Restore PVCs from volume group snapshot") + + restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) + if err != nil { + m.Logger.Error(err, "Failed to restore volume group snapshot") + + return mover.InProgress(), err + } + replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs( ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource) if err != nil { diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 0daa9195a..f23cb1575 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -26,8 +26,8 @@ import ( ) var ( - VolumeGroupSnapshotNameFormat = "cephfscg-%s" - RestorePVCinCGNameFormat = "cephfscg-%s" + VolumeGroupSnapshotNameFormat = "vs-cg-%s" + RestorePVCinCGNameFormat = "vs-cg-%s" SnapshotGroup = "snapshot.storage.k8s.io" SnapshotGroupKind = "VolumeSnapshot" ) diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index 1bdbdf236..1c7d67546 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -437,7 +437,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log // any further and it can be skipped. The pvc will go away eventually. func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { if pvc.Status.Phase != corev1.ClaimBound { - log.Info("Skipping handling of VR as PersistentVolumeClaim is not bound", "pvcPhase", pvc.Status.Phase) + log.Info("Skipping handling of VR as PVC is not bound", "pvcPhase", pvc.Status.Phase) msg := "PVC not bound yet" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) @@ -451,7 +451,7 @@ func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) func isPVCDeletedAndNotProtected(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { // If PVC deleted but not yet protected with a finalizer, skip it! if !containsString(pvc.Finalizers, PvcVRFinalizerProtected) && rmnutil.ResourceIsDeleted(pvc) { - log.Info("Skipping PersistentVolumeClaim, as it is marked for deletion and not yet protected") + log.Info("Skipping PVC, as it is marked for deletion and not yet protected") msg := "Skipping pvc marked for deletion" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) diff --git a/internal/controller/vrg_volsync.go b/internal/controller/vrg_volsync.go index 2f7b107f7..500abee86 100644 --- a/internal/controller/vrg_volsync.go +++ b/internal/controller/vrg_volsync.go @@ -113,6 +113,12 @@ func (v *VRGInstance) reconcileVolSyncAsPrimary(finalSyncPrepared *bool) (requeu } for _, pvc := range v.volSyncPVCs { + if pvc.Status.Phase != corev1.ClaimBound { + v.log.Info("Skipping PVC - PVC is not Bound.", "name", pvc.GetName()) + + continue + } + requeuePVC := v.reconcilePVCAsVolSyncPrimary(pvc) if requeuePVC { requeue = true From 80465f761757b75b3d79eb05305365ef865b177e Mon Sep 17 00:00:00 2001 From: Benamar Mekhissi Date: Thu, 19 Dec 2024 09:56:36 -0500 Subject: [PATCH 08/24] Clean up unit test Signed-off-by: Benamar Mekhissi --- .../cephfscg/replicationgroupsource.go | 2 +- .../cephfscg/volumegroupsourcehandler.go | 22 +-- internal/controller/drplacementcontrol.go | 2 +- internal/controller/volsync/vshandler.go | 5 +- internal/controller/volsync/vshandler_test.go | 135 ++---------------- .../volumereplicationgroup_controller.go | 7 + 6 files changed, 35 insertions(+), 138 deletions(-) diff --git a/internal/controller/cephfscg/replicationgroupsource.go b/internal/controller/cephfscg/replicationgroupsource.go index d217d0e1d..71a1258e9 100644 --- a/internal/controller/cephfscg/replicationgroupsource.go +++ b/internal/controller/cephfscg/replicationgroupsource.go @@ -140,7 +140,7 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), err } - + replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs( ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource) if err != nil { diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index f23cb1575..e92e02a7c 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -146,7 +146,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( return nil } -// CleanVolumeGroupSnapshot delete restored pvc, replicationsource and VolumeGroupSnapshot +// CleanVolumeGroupSnapshot delete restored pvc and VolumeGroupSnapshot // //nolint:funlen func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( @@ -214,36 +214,38 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( return nil } -// RestoreVolumesFromVolumeGroupSnapshot restore VolumeGroupSnapshot to PVCs +// RestoreVolumesFromVolumeGroupSnapshot restores VolumeGroupSnapshot to PVCs +// +//nolint:funlen,cyclop func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( ctx context.Context, owner metav1.Object, ) ([]RestoredPVC, error) { logger := h.Logger.WithName("RestoreVolumesFromVolumeGroupSnapshot") logger.Info("Get volume group snapshot") - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + vgs := &vgsv1alphfa1.VolumeGroupSnapshot{} if err := h.Client.Get(ctx, types.NamespacedName{Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace}, - volumeGroupSnapshot); err != nil { + vgs); err != nil { return nil, fmt.Errorf("failed to get volume group snapshot: %w", err) } - if volumeGroupSnapshot.Status == nil || volumeGroupSnapshot.Status.ReadyToUse == nil || - (volumeGroupSnapshot.Status.ReadyToUse != nil && !*volumeGroupSnapshot.Status.ReadyToUse) { + if vgs.Status == nil || vgs.Status.ReadyToUse == nil || + (vgs.Status.ReadyToUse != nil && !*vgs.Status.ReadyToUse) { return nil, fmt.Errorf("can't restore volume group snapshot: volume group snapshot is not ready to be used") } restoredPVCs := []RestoredPVC{} - for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { + for _, pvcVSRef := range vgs.Status.PVCVolumeSnapshotRefList { logger.Info("Get PVCName from volume snapshot", "PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name) pvc, err := util.GetPVC(ctx, h.Client, - types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) + types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: vgs.Namespace}) if err != nil { return nil, fmt.Errorf("failed to get PVC from VGS %s: %w", - volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) + vgs.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) @@ -428,7 +430,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC } replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{ ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{ - CopyMethod: volsyncv1alpha1.CopyMethodDirect, + CopyMethod: volsyncv1alpha1.CopyMethodDirect, }, KeySecret: &h.VolsyncKeySecretName, diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index b9cd51199..35800c903 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -1711,7 +1711,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation], DRPCUIDAnnotation: string(d.instance.UID), rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation], - rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], + rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], } vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index e67e94912..7f0c9981e 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -1929,8 +1929,7 @@ func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestina snapshotRef *corev1.TypedLocalObjectReference, snapRestoreSize *resource.Quantity, ) (*corev1.PersistentVolumeClaim, error) { - l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, - "snapRestoreSize", snapRestoreSize) + l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize) storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) if err != nil { @@ -1980,8 +1979,6 @@ func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestina return nil }) if err != nil { - l.Error(err, "Unable to createOrUpdate PVC from snapshot for localRS") - return nil, fmt.Errorf("error creating or updating PVC from snapshot for localRS (%w)", err) } diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 92b6087af..a3ebfb264 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -253,11 +253,17 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { storageClassForTest)).To(Succeed()) vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest) - - Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( - []corev1.PersistentVolumeAccessMode{ - corev1.ReadOnlyMany, - })) + if storageClassForTest.Provisioner == testCephFSStorageDriverName { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadOnlyMany, + })) + } else { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + })) + } }) Context("When the source PVC is not using a cephfs storageclass", func() { @@ -272,128 +278,13 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }) Context("When the sourcePVC is using a cephfs storageclass", func() { - customBackingSnapshotStorageClassName := testCephFSStorageClassName + "-vrg" - BeforeEach(func() { // Make sure the source PVC uses the cephfs storageclass testSourcePVC.Spec.StorageClassName = &testCephFSStorageClassName }) - JustBeforeEach(func() { - // Common tests - rsSpec should be modified with settings to allow pvc from snapshot - // to use our custom cephfs storageclass and ReadOnlyMany accessModes - Expect(testRsSpecOrig).NotTo(Equal(testRsSpec)) - - // Should use the custom storageclass with backingsnapshot: true parameter - Expect(*testRsSpec.ProtectedPVC.StorageClassName).To(Equal(customBackingSnapshotStorageClassName)) - - // AccessModes should be updated to ReadOnlyMany - Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( - []corev1.PersistentVolumeAccessMode{ - corev1.ReadOnlyMany, - })) - }) - - AfterEach(func() { - // Delete the custom storage class that may have been created by test - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap does not exist", func() { - // Delete the custom vrg storageclass if it exists - BeforeEach(func() { - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec and create the new storageclass", func() { - // RSspec modification checks in the outer context JustBeforeEach() - - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - Expect(newStorageClass.Parameters["backingSnapshot"]).To(Equal("true")) - - // Other parameters from the test cephfs storageclass should be copied over - for k, v := range testCephFSStorageClass.Parameters { - Expect(newStorageClass.Parameters[k]).To(Equal(v)) - } - }) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap exists", func() { - var preExistingCustStorageClass *storagev1.StorageClass - - BeforeEach(func() { - preExistingCustStorageClass = &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - Provisioner: testCephFSStorageDriverName, - Parameters: map[string]string{ // Not the same params as our CephFS storageclass for test - "different-param-1": "abc", - "different-param-2": "def", - "backingSnapshot": "true", - }, - } - Expect(k8sClient.Create(ctx, preExistingCustStorageClass)).To(Succeed()) - - // Confirm it's created - Eventually(func() error { - return k8sClient.Get(ctx, - client.ObjectKeyFromObject(preExistingCustStorageClass), preExistingCustStorageClass) - }, maxWait, interval).Should(Succeed()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec but not modify the new custom storageclass", func() { - // Load the custom storageclass - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - // Parameters should match the original, unmodified - Expect(newStorageClass.Parameters).To(Equal(preExistingCustStorageClass.Parameters)) - }) + It("ModifyRSSpecForCephFS should modify the rsSpec protectedPVC accessModes", func() { + Expect(testRsSpecOrig).ToNot(Equal(testRsSpec)) }) }) }) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index d96432b04..1e8a71a1b 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -759,6 +759,13 @@ func (v *VRGInstance) addConsistencyGroupLabel(pvc *corev1.PersistentVolumeClaim return fmt.Errorf("missing storageID for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) } + // FIXME: a temporary workaround for issue DFBUGS-1209 + // Remove this block once DFBUGS-1209 is fixed + storageID = "cephfs-" + storageID + if storageClass.Provisioner != DefaultCephFSCSIDriverName { + storageID = "rbd-" + storageID + } + // Add label for PVC, showing that this PVC is part of consistency group return util.NewResourceUpdater(pvc). AddLabel(ConsistencyGroupLabel, storageID). From be376f84de76f55b964491496fa7f21fd73bf331 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Tue, 7 Jan 2025 06:18:44 -0500 Subject: [PATCH 09/24] vrg: always get complete RecipeElements if recipe exists Co-Authored-by: Annaraya Narasagond Signed-off-by: Raghavendra Talur --- internal/controller/vrg_kubeobjects.go | 25 +++++++++++++++++++++++++ internal/controller/vrg_recipe.go | 19 +++---------------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/internal/controller/vrg_kubeobjects.go b/internal/controller/vrg_kubeobjects.go index 43722bd57..da0a52996 100644 --- a/internal/controller/vrg_kubeobjects.go +++ b/internal/controller/vrg_kubeobjects.go @@ -820,6 +820,12 @@ func getCaptureGroups(recipe Recipe.Recipe) ([]kubeobjects.CaptureSpec, error) { for resourceType, resourceName := range resource { captureInstance, err := getResourceAndConvertToCaptureGroup(recipe, resourceType, resourceName) if err != nil { + if errors.Is(err, ErrVolumeCaptureNotSupported) { + // we only use the volumes group for determining the label selector + // ignore it in the capture sequence + continue + } + return resources, err } @@ -852,6 +858,12 @@ func getRecoverGroups(recipe Recipe.Recipe) ([]kubeobjects.RecoverSpec, error) { for resourceType, resourceName := range resource { captureInstance, err := getResourceAndConvertToRecoverGroup(recipe, resourceType, resourceName) if err != nil { + if errors.Is(err, ErrVolumeRecoverNotSupported) { + // we only use the volumes group for determining the label selector + // ignore it in the capture sequence + continue + } + return resources, err } @@ -862,6 +874,11 @@ func getRecoverGroups(recipe Recipe.Recipe) ([]kubeobjects.RecoverSpec, error) { return resources, nil } +var ( + ErrVolumeCaptureNotSupported = errors.New("volume capture not supported") + ErrVolumeRecoverNotSupported = errors.New("volume recover not supported") +) + func getResourceAndConvertToCaptureGroup( recipe Recipe.Recipe, resourceType, name string) (*kubeobjects.CaptureSpec, error, ) { @@ -873,6 +890,10 @@ func getResourceAndConvertToCaptureGroup( } } + if name == recipe.Spec.Volumes.Name { + return nil, ErrVolumeCaptureNotSupported + } + return nil, k8serrors.NewNotFound(schema.GroupResource{Resource: "Recipe.Spec.Group.Name"}, name) } @@ -904,6 +925,10 @@ func getResourceAndConvertToRecoverGroup( } } + if name == recipe.Spec.Volumes.Name { + return nil, ErrVolumeRecoverNotSupported + } + return nil, k8serrors.NewNotFound(schema.GroupResource{Resource: "Recipe.Spec.Group.Name"}, name) } diff --git a/internal/controller/vrg_recipe.go b/internal/controller/vrg_recipe.go index 5c46a50fb..bd787588b 100644 --- a/internal/controller/vrg_recipe.go +++ b/internal/controller/vrg_recipe.go @@ -81,25 +81,12 @@ func GetPVCSelector(ctx context.Context, reader client.Reader, vrg ramen.VolumeR ) (PvcSelector, error) { var recipeElements RecipeElements - return recipeElements.PvcSelector, recipeVolumesAndOptionallyWorkflowsGet( - ctx, reader, vrg, ramenConfig, log, &recipeElements, - func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup, ramen.RamenConfig) error { - return nil - }, - ) + return recipeElements.PvcSelector, RecipeElementsGet( + ctx, reader, vrg, ramenConfig, log, &recipeElements) } func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.VolumeReplicationGroup, ramenConfig ramen.RamenConfig, log logr.Logger, recipeElements *RecipeElements, -) error { - return recipeVolumesAndOptionallyWorkflowsGet(ctx, reader, vrg, ramenConfig, log, recipeElements, - recipeWorkflowsGet, - ) -} - -func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.Reader, vrg ramen.VolumeReplicationGroup, - ramenConfig ramen.RamenConfig, log logr.Logger, recipeElements *RecipeElements, - workflowsGet func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup, ramen.RamenConfig) error, ) error { if vrg.Spec.KubeObjectProtection == nil { *recipeElements = RecipeElements{ @@ -145,7 +132,7 @@ func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.R PvcSelector: selector, } - if err := workflowsGet(recipe, recipeElements, vrg, ramenConfig); err != nil { + if err := recipeWorkflowsGet(recipe, recipeElements, vrg, ramenConfig); err != nil { return err } From d912252c1cd9276643872d28b19807267053e291 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Tue, 7 Jan 2025 06:27:03 -0500 Subject: [PATCH 10/24] vrg: RecipeElementsGet should return RecipeElements Co-Authored-by: Annaraya Narasagond Signed-off-by: Raghavendra Talur --- .../volumereplicationgroup_controller.go | 7 ++-- internal/controller/vrg_recipe.go | 38 +++++++++++-------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index 1e8a71a1b..03a4c8edf 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -549,9 +549,10 @@ func (v *VRGInstance) processVRG() ctrl.Result { } } - if err := RecipeElementsGet( - v.ctx, v.reconciler.Client, *v.instance, *v.ramenConfig, v.log, &v.recipeElements, - ); err != nil { + var err error + + v.recipeElements, err = RecipeElementsGet(v.ctx, v.reconciler.Client, *v.instance, *v.ramenConfig, v.log) + if err != nil { return v.invalid(err, "Failed to get recipe", false) } diff --git a/internal/controller/vrg_recipe.go b/internal/controller/vrg_recipe.go index bd787588b..452f55ddf 100644 --- a/internal/controller/vrg_recipe.go +++ b/internal/controller/vrg_recipe.go @@ -79,31 +79,35 @@ func GetPVCSelector(ctx context.Context, reader client.Reader, vrg ramen.VolumeR ramenConfig ramen.RamenConfig, log logr.Logger, ) (PvcSelector, error) { - var recipeElements RecipeElements + recipeElements, err := RecipeElementsGet(ctx, reader, vrg, ramenConfig, log) + if err != nil { + return recipeElements.PvcSelector, err + } - return recipeElements.PvcSelector, RecipeElementsGet( - ctx, reader, vrg, ramenConfig, log, &recipeElements) + return recipeElements.PvcSelector, nil } func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.VolumeReplicationGroup, - ramenConfig ramen.RamenConfig, log logr.Logger, recipeElements *RecipeElements, -) error { + ramenConfig ramen.RamenConfig, log logr.Logger, +) (RecipeElements, error) { + var recipeElements RecipeElements + if vrg.Spec.KubeObjectProtection == nil { - *recipeElements = RecipeElements{ + recipeElements = RecipeElements{ PvcSelector: getPVCSelector(vrg, ramenConfig, nil, nil), } - return nil + return recipeElements, nil } if vrg.Spec.KubeObjectProtection.RecipeRef == nil { - *recipeElements = RecipeElements{ + recipeElements = RecipeElements{ PvcSelector: getPVCSelector(vrg, ramenConfig, nil, nil), CaptureWorkflow: captureWorkflowDefault(vrg, ramenConfig), RecoverWorkflow: recoverWorkflowDefault(vrg, ramenConfig), } - return nil + return recipeElements, nil } recipeNamespacedName := types.NamespacedName{ @@ -113,11 +117,11 @@ func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.Volu recipe := recipe.Recipe{} if err := reader.Get(ctx, recipeNamespacedName, &recipe); err != nil { - return fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) + return recipeElements, fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) } if err := RecipeParametersExpand(&recipe, vrg.Spec.KubeObjectProtection.RecipeParameters, log); err != nil { - return err + return recipeElements, fmt.Errorf("recipe %v parameters expansion error: %w", recipeNamespacedName.String(), err) } var selector PvcSelector @@ -128,15 +132,19 @@ func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.Volu recipe.Spec.Volumes.LabelSelector) } - *recipeElements = RecipeElements{ + recipeElements = RecipeElements{ PvcSelector: selector, } - if err := recipeWorkflowsGet(recipe, recipeElements, vrg, ramenConfig); err != nil { - return err + if err := recipeWorkflowsGet(recipe, &recipeElements, vrg, ramenConfig); err != nil { + return recipeElements, fmt.Errorf("recipe %v workflows get error: %w", recipeNamespacedName.String(), err) + } + + if err := recipeNamespacesValidate(recipeElements, vrg, ramenConfig); err != nil { + return recipeElements, fmt.Errorf("recipe %v namespaces validation error: %w", recipeNamespacedName.String(), err) } - return recipeNamespacesValidate(*recipeElements, vrg, ramenConfig) + return recipeElements, nil } func RecipeParametersExpand(recipe *recipe.Recipe, parameters map[string][]string, From 20a07d0dd231a144b8a9c243e02648ea9b1a222e Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Wed, 8 Jan 2025 13:19:51 -0500 Subject: [PATCH 11/24] tests: fix the tests Co-Authored-by: Annaraya Narasagond Signed-off-by: Raghavendra Talur --- internal/controller/vrg_pvc_selector_test.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/internal/controller/vrg_pvc_selector_test.go b/internal/controller/vrg_pvc_selector_test.go index 318f8f9fa..9dd2d279b 100644 --- a/internal/controller/vrg_pvc_selector_test.go +++ b/internal/controller/vrg_pvc_selector_test.go @@ -41,10 +41,12 @@ var _ = Describe("VolumeReplicationGroupPVCSelector", func() { testCtx, cancel = context.WithCancel(context.TODO()) Expect(k8sClient).NotTo(BeNil()) vrgTestNamespace = createUniqueNamespace(testCtx) + ramenConfig.RamenOpsNamespace = vrgTestNamespace }) AfterEach(func() { Expect(k8sClient.Delete(testCtx, testNamespace)).To(Succeed()) + ramenConfig.RamenOpsNamespace = "" cancel() }) @@ -164,8 +166,9 @@ func getBaseVRG(namespace string) *ramen.VolumeReplicationGroup { Async: &ramen.VRGAsyncSpec{ SchedulingInterval: "5m", }, - ReplicationState: ramen.Primary, - S3Profiles: []string{"dummy-s3-profile"}, + ReplicationState: ramen.Primary, + S3Profiles: []string{"dummy-s3-profile"}, + ProtectedNamespaces: &[]string{namespace}, }, } } @@ -200,12 +203,13 @@ func getVRGDefinitionWithKubeObjectProtection(hasPVCSelectorLabels bool, namespa return vrg } -func getTestHook() *Recipe.Hook { +func getTestHook(testNamespace string) *Recipe.Hook { duration := 30 return &Recipe.Hook{ - Name: "hook-single", - Type: "exec", + Name: "hook-single", + Type: "exec", + Namespace: testNamespace, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "myapp": "testapp", @@ -267,7 +271,7 @@ func getRecipeDefinition(namespace string) *Recipe.Recipe { Spec: Recipe.RecipeSpec{ Groups: []*Recipe.Group{getTestGroup()}, Volumes: getTestVolumeGroup(), - Hooks: []*Recipe.Hook{getTestHook()}, + Hooks: []*Recipe.Hook{getTestHook(namespace)}, Workflows: []*Recipe.Workflow{ { Name: "backup", @@ -279,7 +283,7 @@ func getRecipeDefinition(namespace string) *Recipe.Recipe { "group": "test-group", }, { - "hook": "test-hook", + "hook": "hook-single/checkpoint", }, }, }, From 8d6e098b4e59fc4fe58aaff4f4cb00463ef61eac Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Mon, 13 Jan 2025 16:11:27 +0200 Subject: [PATCH 12/24] Fix typos in drplacementcontroller Found by Zed typos extension. Not sure when these were introduced, but my editor started to complains about them now. Signed-off-by: Nir Soffer --- internal/controller/drplacementcontrol_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/controller/drplacementcontrol_controller.go b/internal/controller/drplacementcontrol_controller.go index f3399490a..ff45e54f7 100644 --- a/internal/controller/drplacementcontrol_controller.go +++ b/internal/controller/drplacementcontrol_controller.go @@ -879,7 +879,7 @@ func getPlacementOrPlacementRule( usrPlacement, err = getPlacementRule(ctx, k8sclient, drpc, log) if err != nil { if k8serrors.IsNotFound(err) { - // PacementRule not found. Check Placement instead + // PlacementRule not found. Check Placement instead usrPlacement, err = getPlacement(ctx, k8sclient, drpc, log) } @@ -2157,7 +2157,7 @@ func (r *DRPlacementControlReconciler) determineDRPCState( } msg := fmt.Sprintf("Failover is allowed - VRGs count:'%d'. drpcAction:'%s'."+ - " vrgAction:'%s'. DstCluster:'%s'. vrgOnCluste '%s'", + " vrgAction:'%s'. DstCluster:'%s'. vrgOnCluster '%s'", len(vrgs), drpc.Spec.Action, vrg.Spec.Action, dstCluster, clusterName) return AllowFailover, msg, nil From 94f96c151cfb8e27bc47d2aad117f643f7b182e9 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Mon, 13 Jan 2025 16:18:08 +0200 Subject: [PATCH 13/24] Fix typos in drplacementcontrol tests - uncomplete -> incomplete - plcement -> placement - exptected -> expected There were many instances of the same typos since lot of tests code is duplicated. Signed-off-by: Nir Soffer --- .../drplacementcontrol_controller_test.go | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/internal/controller/drplacementcontrol_controller_test.go b/internal/controller/drplacementcontrol_controller_test.go index 413d45af3..7a068b78b 100644 --- a/internal/controller/drplacementcontrol_controller_test.go +++ b/internal/controller/drplacementcontrol_controller_test.go @@ -396,7 +396,7 @@ func setRestorePVsComplete() { restorePVs = true } -func setRestorePVsUncomplete() { +func setRestorePVsIncomplete() { restorePVs = false } @@ -1300,7 +1300,7 @@ func getManagedClusterViewCount(homeClusterNamespace string) int { } func verifyUserPlacementRuleDecision(name, namespace, homeCluster string) { - usrPlcementLookupKey := types.NamespacedName{ + usrPlacementLookupKey := types.NamespacedName{ Name: name, Namespace: namespace, } @@ -1310,10 +1310,10 @@ func verifyUserPlacementRuleDecision(name, namespace, homeCluster string) { var placementObj client.Object Eventually(func() bool { - err := k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlRule) + err := k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlRule) if k8serrors.IsNotFound(err) { usrPlmnt := &clrapiv1beta1.Placement{} - err = k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlmnt) + err = k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlmnt) if err != nil { return false } @@ -1351,7 +1351,7 @@ func getPlacementDecision(plName, plNamespace string) *clrapiv1beta1.PlacementDe //nolint:unparam func verifyUserPlacementRuleDecisionUnchanged(name, namespace, homeCluster string) { - usrPlcementLookupKey := types.NamespacedName{ + usrPlacementLookupKey := types.NamespacedName{ Name: name, Namespace: namespace, } @@ -1361,10 +1361,10 @@ func verifyUserPlacementRuleDecisionUnchanged(name, namespace, homeCluster strin var placementObj client.Object Consistently(func() bool { - err := k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlRule) + err := k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlRule) if k8serrors.IsNotFound(err) { usrPlmnt := &clrapiv1beta1.Placement{} - err = k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlmnt) + err = k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlmnt) if err != nil { return false } @@ -1952,7 +1952,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("DRAction changes to Failover", func() { It("Should not failover to Secondary (West1ManagedCluster) till PV manifest is applied", func() { By("\n\n*** Failover - 1\n\n") - setRestorePVsUncomplete() + setRestorePVsIncomplete() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(userPlacementRule.Name, userPlacementRule.Namespace, East1ManagedCluster) // MWs for VRG, NS, DRCluster, and MMode @@ -2064,7 +2064,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) When("DRAction changes to Failover using Placement with Subscription", func() { It("Should not failover to Secondary (West1ManagedCluster) till PV manifest is applied", func() { - setRestorePVsUncomplete() + setRestorePVsIncomplete() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(placement.Name, placement.Namespace, East1ManagedCluster) // MWs for VRG, NS, VRG DRCluster, and MMode @@ -2140,7 +2140,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) When("DRAction changes to Failover using Placement", func() { It("Should not failover to Secondary (West1ManagedCluster) till PV manifest is applied", func() { - setRestorePVsUncomplete() + setRestorePVsIncomplete() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(placement.Name, placement.Namespace, East1ManagedCluster) // MWs for VRG, NS, VRG DRCluster, and MMode @@ -2223,7 +2223,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("DRAction changes to Failover", func() { It("Should not failover to Secondary (East2ManagedCluster) till PV manifest is applied", func() { By("\n\n*** Failover - 1\n\n") - setRestorePVsUncomplete() + setRestorePVsIncomplete() fenceCluster(East1ManagedCluster, false) setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, East2ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(userPlacementRule.Name, userPlacementRule.Namespace, East1ManagedCluster) @@ -2298,7 +2298,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("DRAction changes to Failover", func() { It("Should not failover to Secondary (East2ManagedCluster) till PV manifest is applied", func() { By("\n\n*** Failover - 1\n\n") - setRestorePVsUncomplete() + setRestorePVsIncomplete() fenceCluster(East1ManagedCluster, true) setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, East2ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(userPlacementRule.Name, userPlacementRule.Namespace, East1ManagedCluster) @@ -2393,11 +2393,11 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearDRPCStatus() expectedAction := rmn.DRAction("") expectedPhase := rmn.Deployed - exptectedPorgression := rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) resetClusterDown() - exptectedCompleted := rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedCompleted) + expectedCompleted := rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedCompleted) }) }) //nolint:lll @@ -2417,8 +2417,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearDRPCStatus() expectedAction := rmn.DRAction("") expectedPhase := rmn.WaitForUser - exptectedPorgression := rmn.ProgressionActionPaused - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionActionPaused + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) }) }) @@ -2455,8 +2455,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) expectedAction := rmn.ActionFailover expectedPhase := rmn.WaitForUser - exptectedPorgression := rmn.ProgressionActionPaused - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionActionPaused + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) checkConditionAllowFailover(DefaultDRPCNamespace) // User intervention is required (simulate user intervention) @@ -2464,8 +2464,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) expectedAction = rmn.ActionFailover expectedPhase = rmn.FailedOver - exptectedPorgression = rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression = rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) waitForCompletion(string(rmn.FailedOver)) }) }) @@ -2497,16 +2497,16 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearDRPCStatus() expectedAction := rmn.ActionRelocate expectedPhase := rmn.DRState("") - exptectedPorgression := rmn.ProgressionStatus("") - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionStatus("") + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) // User intervention is required (simulate user intervention) resetClusterDown() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionRelocate) expectedAction = rmn.ActionRelocate expectedPhase = rmn.Relocated - exptectedPorgression = rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression = rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) waitForCompletion(string(rmn.Relocated)) }) }) @@ -2528,8 +2528,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, "") expectedAction := rmn.DRAction("") expectedPhase := rmn.WaitForUser - exptectedPorgression := rmn.ProgressionActionPaused - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionActionPaused + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) checkConditionAllowFailover(DefaultDRPCNamespace) // User intervention is required (simulate user intervention) @@ -2537,8 +2537,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionRelocate) expectedAction = rmn.ActionRelocate expectedPhase = rmn.Relocated - exptectedPorgression = rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression = rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) waitForCompletion(string(rmn.Relocated)) }) }) @@ -2812,7 +2812,7 @@ func verifyRDSpecAfterActionSwitch(primaryCluster, secondaryCluster string, numO } func verifyDRPCStateAndProgression(expectedAction rmn.DRAction, expectedPhase rmn.DRState, - exptectedPorgression rmn.ProgressionStatus, + expectedPorgression rmn.ProgressionStatus, ) { var phase rmn.DRState @@ -2823,15 +2823,15 @@ func verifyDRPCStateAndProgression(expectedAction rmn.DRAction, expectedPhase rm phase = drpc.Status.Phase progression = drpc.Status.Progression - return phase == expectedPhase && progression == exptectedPorgression + return phase == expectedPhase && progression == expectedPorgression }, timeout, interval).Should(BeTrue(), - fmt.Sprintf("Phase has not been updated yet! Phase:%s Expected:%s - progression:%s exptected:%s", - phase, expectedPhase, progression, exptectedPorgression)) + fmt.Sprintf("Phase has not been updated yet! Phase:%s Expected:%s - progression:%s expected:%s", + phase, expectedPhase, progression, expectedPorgression)) drpc := getLatestDRPC(DefaultDRPCNamespace) Expect(drpc.Spec.Action).Should(Equal(expectedAction)) Expect(drpc.Status.Phase).Should(Equal(expectedPhase)) - Expect(drpc.Status.Progression).Should(Equal(exptectedPorgression)) + Expect(drpc.Status.Progression).Should(Equal(expectedPorgression)) } func checkConditionAllowFailover(namespace string) { From 36c10d276a10fd212d3cd502fa94f6c93dbc054f Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Tue, 7 Jan 2025 05:08:08 -0500 Subject: [PATCH 14/24] vrg: cleanup rd when vrg is secondary In processForDeletion, we cleanup the resources that are related to volsync. It relies on v.volSyncPVCs but the list is empty when the vrg is secondary because it is populated by looking at the vrg.status.ProtectedPVCs. For managed applications, the RD,RS and Snapshots were being as a consequence of the deletion of VRG as the VRG is the owner for them. In the case of discovered apps, the vrg is not the owner and therefore the RD was being left behind. Co-Authored-by: Annaraya Narasagond Signed-off-by: Raghavendra Talur --- internal/controller/vrg_volsync.go | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/internal/controller/vrg_volsync.go b/internal/controller/vrg_volsync.go index 500abee86..c23d127ad 100644 --- a/internal/controller/vrg_volsync.go +++ b/internal/controller/vrg_volsync.go @@ -554,23 +554,39 @@ func (v *VRGInstance) disownPVCs() error { return nil } -// cleanupResources this function deleted all PS, PD and VolumeSnapshots from its owner (VRG) +// cleanupResources this function deleted all RS, RD and VolumeSnapshots from its owner (VRG) func (v *VRGInstance) cleanupResources() error { for idx := range v.volSyncPVCs { pvc := &v.volSyncPVCs[idx] - if err := v.volSyncHandler.DeleteRS(pvc.Name, pvc.Namespace); err != nil { + if err := v.doCleanupResources(pvc.Name, pvc.Namespace); err != nil { return err } + } - if err := v.volSyncHandler.DeleteRD(pvc.Name, pvc.Namespace); err != nil { - return err - } + for idx := range v.instance.Spec.VolSync.RDSpec { + protectedPVC := v.instance.Spec.VolSync.RDSpec[idx].ProtectedPVC - if err := v.volSyncHandler.DeleteSnapshots(pvc.Namespace); err != nil { + if err := v.doCleanupResources(protectedPVC.Name, protectedPVC.Namespace); err != nil { return err } } return nil } + +func (v *VRGInstance) doCleanupResources(name, namespace string) error { + if err := v.volSyncHandler.DeleteRS(name, namespace); err != nil { + return err + } + + if err := v.volSyncHandler.DeleteRD(name, namespace); err != nil { + return err + } + + if err := v.volSyncHandler.DeleteSnapshots(namespace); err != nil { + return err + } + + return nil +} From 3bc86fa0cf72a08f71b5e0d7402a58637c8581c7 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Tue, 7 Jan 2025 06:34:38 -0500 Subject: [PATCH 15/24] e2e: enable discovered app tests for cephfs Co-Authored-by: Annaraya Narasagond Signed-off-by: Raghavendra Talur --- e2e/config.yaml.sample | 2 -- 1 file changed, 2 deletions(-) diff --git a/e2e/config.yaml.sample b/e2e/config.yaml.sample index 34b779f03..f44ed19c3 100644 --- a/e2e/config.yaml.sample +++ b/e2e/config.yaml.sample @@ -9,5 +9,3 @@ pvcspecs: - name: cephfs storageclassname: rook-cephfs accessmodes: ReadWriteMany - unsupportedDeployers: - - disapp From c285a16617c736f2454e851ae8d6d48df5529a54 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Wed, 8 Jan 2025 21:10:11 -0500 Subject: [PATCH 16/24] golangci-lint: remove if-return from the linters if-return has been removed from the default list of revive for a valid reason. See the argument in https://github.com/mgechev/revive/pull/843. Comparing two snippets 1. ``` if err := v.volSyncHandler.DeleteRS(name, namespace); err != nil { return err } if err := v.volSyncHandler.DeleteRD(name, namespace); err != nil { return err } if err := v.volSyncHandler.DeleteSnapshots(namespace); err != nil { return err } return nil ``` 2. ``` if err := v.volSyncHandler.DeleteRS(name, namespace); err != nil { return err } if err := v.volSyncHandler.DeleteRD(name, namespace); err != nil { return err } return v.volSyncHandler.DeleteSnapshots(namespace) ``` 1 is a lot easier to read compared to 2 but if-return throws an error for it. Signed-off-by: Raghavendra Talur --- .golangci.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.golangci.yaml b/.golangci.yaml index 1488b0a82..4ae25207b 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -47,7 +47,6 @@ linters-settings: - name: error-strings - name: error-naming - name: exported - - name: if-return - name: increment-decrement - name: var-naming - name: var-declaration From ac85abfe5b6229ef79afee4fc30516589dfb9922 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Fri, 10 Jan 2025 10:15:55 -0500 Subject: [PATCH 17/24] vrg: return nil on error Signed-off-by: Raghavendra Talur --- internal/controller/vrg_recipe.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/vrg_recipe.go b/internal/controller/vrg_recipe.go index 452f55ddf..0049629d1 100644 --- a/internal/controller/vrg_recipe.go +++ b/internal/controller/vrg_recipe.go @@ -81,7 +81,7 @@ func GetPVCSelector(ctx context.Context, reader client.Reader, vrg ramen.VolumeR ) (PvcSelector, error) { recipeElements, err := RecipeElementsGet(ctx, reader, vrg, ramenConfig, log) if err != nil { - return recipeElements.PvcSelector, err + return PvcSelector{}, err } return recipeElements.PvcSelector, nil From 3bbc7598c496474d96c75d299026b830b9291456 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 4 Dec 2024 19:43:52 +0200 Subject: [PATCH 18/24] Update macOS setup instructions Add socket_vment instructions tailored for drenv, that can be copied and pasted in the shell for easy installation. - Recommend latest Lima version - Recommend installing socket_vmnet from binary package instead of source - Add copyable instructions for installing socket_vmnet binary package - Replace the note about brew with note about installing the launchd service. The docs already warn about brew. Signed-off-by: Nir Soffer --- test/README.md | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/test/README.md b/test/README.md index 42603f285..eaa407aab 100644 --- a/test/README.md +++ b/test/README.md @@ -144,7 +144,8 @@ environment. virtctl ``` - lima version 1.0.0 or later is required. + lima version 1.0.0 or later is required, latest version is + recommended. 1. Install the `clusteradm` tool. See [Install clusteradm CLI tool](https://open-cluster-management.io/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool) @@ -164,19 +165,25 @@ environment. For more info see [kubectl-gather](https://github.com/nirs/kubectl-gather) -1. Install `socket_vmnet` from source +1. Install `socket_vmnet` > [!IMPORTANT] - > Do not install socket_vmnet from brew, it is insecure. + > You must install the socket_vmnet launchd service, we don't manage + > socket_vment with Lima. ``` - git clone https://github.com/lima-vm/socket_vmnet.git - cd socket_vmnet - sudo make PREFIX=/opt/socket_vmnet install.bin - sudo make PREFIX=/opt/socket_vmnet install.launchd + VERSION="$(curl -fsSL https://api.github.com/repos/lima-vm/socket_vmnet/releases/latest | jq -r .tag_name)" + FILE="socket_vmnet-${VERSION:1}-$(uname -m).tar.gz" + SERVICE_ID="io.github.lima-vm.socket_vmnet" + curl -OSL "https://github.com/lima-vm/socket_vmnet/releases/download/${VERSION}/${FILE}" + sudo tar Cxzvf / "${FILE}" opt/socket_vmnet + sudo cp "/opt/socket_vmnet/share/doc/socket_vmnet/launchd/$SERVICE_ID.plist" "/Library/LaunchDaemons/$SERVICE_ID.plist" + sudo launchctl bootstrap system "/Library/LaunchDaemons/$SERVICE_ID.plist" + sudo launchctl enable system/$SERVICE_ID + sudo launchctl kickstart -kp system/$SERVICE_ID ``` - For more info see [Installing socket_vmnet from source](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-source) + For more info see [Installing socket_vmnet from binary](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-binary) ## Testing that drenv is healthy From 2975b369e771fdca741638797987b1ed79c2029a Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Tue, 14 Jan 2025 13:06:36 +0200 Subject: [PATCH 19/24] ci: Timeout gather job after 3 minutes Gathering typically takes less than 15 seconds, but we have seen one case when it was stuck for 5 hours[1]. In this case the job was terminated without archiving the logs and we have no way to debug the issue. Add a 3 minutes timeout to the gather job. If the command times out, we kill it and archive what we got. This is likely to collect enough logs to help debugging the original failure and the secondary gather failure (using gather.log). [1] https://github.com/RamenDR/ramen/actions/runs/12712182155/job/35437188040 Signed-off-by: Nir Soffer --- .github/workflows/e2e.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 1f6a483ae..0e1ad143c 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -79,6 +79,8 @@ jobs: - name: Gather environment data if: failure() working-directory: test + # Gathering typically takes less than 15 seconds. + timeout_minutes: 3 run: drenv gather --directory ${{ env.GATHER_DIR }} envs/regional-dr.yaml # Tar manually to work around github limitations with special chracters (:) From f4677aabbd0094eb6274247f5962181f78f76322 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Mon, 13 Jan 2025 22:58:25 +0200 Subject: [PATCH 20/24] Update ramen to use csi-addons 0.11.0 Generated using: % GOTOOLCHAIN=go1.22.9 go get github.com/csi-addons/kubernetes-csi-addons@v0.11.0 go: upgraded go 1.22.5 => 1.22.7 go: upgraded toolchain go1.22.7 => go1.22.9 go: upgraded github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 => v0.11.0 go: upgraded google.golang.org/protobuf v1.35.1 => v1.35.2 go: upgraded k8s.io/api v0.31.1 => v0.31.2 go: upgraded k8s.io/apimachinery v0.31.1 => v0.31.2 go: upgraded sigs.k8s.io/controller-runtime v0.19.0 => v0.19.1 % go mod tidy go: downloading github.com/sagikazarmark/locafero v0.4.0 go: downloading golang.org/x/tools v0.28.0 go: downloading github.com/frankban/quicktest v1.14.6 go: downloading github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad go: downloading github.com/sourcegraph/conc v0.3.0 Boris says we have now go 1.22.9 on the downstream build machines so we can safely use it. We need to add a GOTOOLCHAIN makefile variable later to make this simpler for future updates. Signed-off-by: Nir Soffer --- go.mod | 14 +++++++------- go.sum | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 38f11fa24..e88e92bfc 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/ramendr/ramen -go 1.22.5 +go 1.22.7 -toolchain go1.22.7 +toolchain go1.22.9 // This replace should always be here for ease of development. replace github.com/ramendr/ramen/api => ./api @@ -10,7 +10,7 @@ replace github.com/ramendr/ramen/api => ./api require ( github.com/aws/aws-sdk-go v1.55.5 github.com/backube/volsync v0.11.0 - github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 + github.com/csi-addons/kubernetes-csi-addons v0.11.0 github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 @@ -25,9 +25,9 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 golang.org/x/time v0.8.0 - k8s.io/api v0.31.1 + k8s.io/api v0.31.2 k8s.io/apiextensions-apiserver v0.31.1 - k8s.io/apimachinery v0.31.1 + k8s.io/apimachinery v0.31.2 k8s.io/client-go v12.0.0+incompatible k8s.io/component-base v0.31.1 k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 @@ -36,7 +36,7 @@ require ( open-cluster-management.io/config-policy-controller v0.15.0 open-cluster-management.io/governance-policy-propagator v0.15.0 open-cluster-management.io/multicloud-operators-subscription v0.15.0 - sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/controller-runtime v0.19.1 sigs.k8s.io/yaml v1.4.0 ) @@ -99,7 +99,7 @@ require ( golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 17f81ffbd..fa7619800 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 h1:9mh79gS8O8uO5okZ2DhFO0LSrhpVXd9R9DLvbnh2He4= -github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1/go.mod h1:LeY7UYm8nEBCG1RcJG0DHmJbva0ILmtp+kcegxRuHhc= +github.com/csi-addons/kubernetes-csi-addons v0.11.0 h1:0f6AIXcpu68Vj0Q1IKij1l6arJfKFiaTZ9GwHuvLm/o= +github.com/csi-addons/kubernetes-csi-addons v0.11.0/go.mod h1:HJd3znD4i5D92/2PKqzrwBg5Q7Ur2me20VYakdBHzpk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -209,8 +209,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -226,12 +226,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= @@ -252,8 +252,8 @@ open-cluster-management.io/governance-policy-propagator v0.15.0 h1:tSDJcq8p/UQHB open-cluster-management.io/governance-policy-propagator v0.15.0/go.mod h1:I1LbX78mavWMv6W3YAeSjCq2YBfSS0RpOBWOskpbLng= open-cluster-management.io/multicloud-operators-subscription v0.15.0 h1:/FPaCfTn8PaDQCYMAhDw7xdH4TsaQlV6Ufi9zyWwyYw= open-cluster-management.io/multicloud-operators-subscription v0.15.0/go.mod h1:lDMnGyFWoyWFjrAJRrnnWz5Gz2IUsqRsvPV44ll7zXc= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= +sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= From 078f77038a06b393b354d80c4fab740a18a7f2f8 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Tue, 14 Jan 2025 13:47:19 +0200 Subject: [PATCH 21/24] ci: Fix gather timeout I copied the timeout_minutes which is correct for the retry action, but the builtin timeout is timeout-minutes. The PR was merge too quickly without waiting the CI. Signed-off-by: Nir Soffer --- .github/workflows/e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 0e1ad143c..301ec43c3 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -80,7 +80,7 @@ jobs: if: failure() working-directory: test # Gathering typically takes less than 15 seconds. - timeout_minutes: 3 + timeout-minutes: 3 run: drenv gather --directory ${{ env.GATHER_DIR }} envs/regional-dr.yaml # Tar manually to work around github limitations with special chracters (:) From 8f8ada63d652997ed39111ecaead3f2b130cf2d8 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Tue, 14 Jan 2025 13:11:24 -0500 Subject: [PATCH 22/24] ramenctl: enable volsync When using discovered apps, we check for volsync support in two places in the ramenconfig. - Under multiNamespace map, we look for volsyncSupported: true key value pair - Under volsync map, we need the key "disabled" to be set to false. Ramenctl was configuring only the volsync map but not the multiNamespace one. We have to live with both the fields for now because of backward compatibility reasons. Signed-off-by: Raghavendra Talur --- ramenctl/ramenctl/resources/configmap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ramenctl/ramenctl/resources/configmap.yaml b/ramenctl/ramenctl/resources/configmap.yaml index e8c041dba..c06ed47d5 100644 --- a/ramenctl/ramenctl/resources/configmap.yaml +++ b/ramenctl/ramenctl/resources/configmap.yaml @@ -38,6 +38,7 @@ data: disabled: $volsync_disabled multiNamespace: FeatureEnabled: true + volsyncSupported: true ramenOpsNamespace: ramen-ops s3StoreProfiles: - s3ProfileName: minio-on-$cluster1 From 30dc1dbcf8ae8d2028b082b100690de2046ba5a6 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Thu, 9 Jan 2025 03:00:57 -0500 Subject: [PATCH 23/24] vrg: use reader instead of client for check hooks Co-Authored-by: Annaraya Narasagond Signed-off-by: Raghavendra Talur --- internal/controller/util/json_util.go | 14 +++++++------- internal/controller/vrg_kubeobjects.go | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/controller/util/json_util.go b/internal/controller/util/json_util.go index 6d0c03fb6..e97a2020f 100644 --- a/internal/controller/util/json_util.go +++ b/internal/controller/util/json_util.go @@ -30,7 +30,7 @@ const ( pInterval = 100 ) -func EvaluateCheckHook(k8sClient client.Client, hook *kubeobjects.HookSpec, log logr.Logger) (bool, error) { +func EvaluateCheckHook(k8sClient client.Reader, hook *kubeobjects.HookSpec, log logr.Logger) (bool, error) { if hook.LabelSelector == nil && hook.NameSelector == "" { return false, fmt.Errorf("either nameSelector or labelSelector should be provided to get resources") } @@ -91,7 +91,7 @@ func EvaluateCheckHookForObjects(objs []client.Object, hook *kubeobjects.HookSpe return finalRes, err } -func getResourcesList(k8sClient client.Client, hook *kubeobjects.HookSpec) ([]client.Object, error) { +func getResourcesList(k8sClient client.Reader, hook *kubeobjects.HookSpec) ([]client.Object, error) { resourceList := make([]client.Object, 0) var objList client.ObjectList @@ -128,14 +128,14 @@ func getResourcesList(k8sClient client.Client, hook *kubeobjects.HookSpec) ([]cl return resourceList, nil } -func getResourcesUsingLabelSelector(c client.Client, hook *kubeobjects.HookSpec, +func getResourcesUsingLabelSelector(c client.Reader, hook *kubeobjects.HookSpec, objList client.ObjectList, ) ([]client.Object, error) { filteredObjs := make([]client.Object, 0) selector, err := metav1.LabelSelectorAsSelector(hook.LabelSelector) if err != nil { - return filteredObjs, fmt.Errorf("error during labelSelector to selector conversion") + return filteredObjs, fmt.Errorf("error converting labelSelector to selector") } listOps := &client.ListOptions{ @@ -145,13 +145,13 @@ func getResourcesUsingLabelSelector(c client.Client, hook *kubeobjects.HookSpec, err = c.List(context.Background(), objList, listOps) if err != nil { - return filteredObjs, err + return filteredObjs, fmt.Errorf("error listing resources using labelSelector: %w", err) } return getObjectsBasedOnType(objList), nil } -func getResourcesUsingNameSelector(c client.Client, hook *kubeobjects.HookSpec, +func getResourcesUsingNameSelector(c client.Reader, hook *kubeobjects.HookSpec, objList client.ObjectList, ) ([]client.Object, error) { filteredObjs := make([]client.Object, 0) @@ -169,7 +169,7 @@ func getResourcesUsingNameSelector(c client.Client, hook *kubeobjects.HookSpec, err = c.List(context.Background(), objList, listOps) if err != nil { - return filteredObjs, err + return filteredObjs, fmt.Errorf("error listing resources using nameSelector: %w", err) } return getObjectsBasedOnType(objList), nil diff --git a/internal/controller/vrg_kubeobjects.go b/internal/controller/vrg_kubeobjects.go index da0a52996..2d4fba5c3 100644 --- a/internal/controller/vrg_kubeobjects.go +++ b/internal/controller/vrg_kubeobjects.go @@ -288,7 +288,7 @@ func (v *VRGInstance) kubeObjectsCaptureStartOrResume( func (v *VRGInstance) executeHook(hook kubeobjects.HookSpec, log1 logr.Logger) error { if hook.Type == "check" { - hookResult, err := util.EvaluateCheckHook(v.reconciler.Client, &hook, log1) + hookResult, err := util.EvaluateCheckHook(v.reconciler.APIReader, &hook, log1) if err != nil { log1.Error(err, "error occurred during check hook ") From cb530567fb5068346cbbcfc4da130426d047edb9 Mon Sep 17 00:00:00 2001 From: Annaraya Narasagond Date: Wed, 22 Jan 2025 23:12:55 +0530 Subject: [PATCH 24/24] e2e: some additional changes --- e2e/deployers/discoveredapp.go | 229 ++++++++++++++++----------------- e2e/dractions/discovered.go | 4 +- e2e/exhaustive_suite_test.go | 4 +- 3 files changed, 118 insertions(+), 119 deletions(-) diff --git a/e2e/deployers/discoveredapp.go b/e2e/deployers/discoveredapp.go index 2cb359cf1..8699225a9 100644 --- a/e2e/deployers/discoveredapp.go +++ b/e2e/deployers/discoveredapp.go @@ -12,10 +12,7 @@ import ( "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" recipe "github.com/ramendr/recipe/api/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -105,124 +102,124 @@ func (d DiscoveredApp) Deploy(ctx types.Context) error { log.Info("recipe created on both dr clusters") } - if d.IncludeHooks && d.IncludeRecipe && d.IncludeVolumes { - deployment := getDeployment(appNamespace) - err := util.Ctx.C1.Client.Create(context.Background(), deployment) - if err != nil { - log.Error("error during creation of deployment") - } + // if d.IncludeHooks && d.IncludeRecipe && d.IncludeVolumes { + // deployment := getDeployment(appNamespace) + // err := util.Ctx.C1.Client.Create(context.Background(), deployment) + // if err != nil { + // log.Error("error during creation of deployment") + // } - pvc := getPvc(appNamespace) - err = util.Ctx.C1.Client.Create(context.Background(), pvc) - if err != nil { - log.Error("error during creation of pvc") - } - } + // pvc := getPvc(appNamespace) + // err = util.Ctx.C1.Client.Create(context.Background(), pvc) + // if err != nil { + // log.Error("error during creation of pvc") + // } + // } return nil } -func getPvc(ns string) *corev1.PersistentVolumeClaim { - scName := "rook-ceph-block" - return &corev1.PersistentVolumeClaim{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "PersistentVolumeClaim", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busybox-pvc-vol", - Namespace: ns, - Labels: map[string]string{ - "appname": "busybox-vol", - }, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: corev1.VolumeResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - }, - StorageClassName: &scName, - }, - } -} - -func getDeployment(ns string) *appsv1.Deployment { - var i int32 = 1 - return &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1", - Kind: "Deployment", - }, - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "appname": "busybox-vol", - }, - Name: "busybox-vol", - Namespace: ns, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &i, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "appname": "busybox-vol", - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "appname": "busybox-vol", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Command: []string{ - "sh", - "-c", - `emit() { - echo "$(date) $1" | tee -a /var/log/ramen.log - sync - } - trap "emit STOP; exit" TERM - emit START - while true; do - sleep 10 & wait - emit UPDATE - done`, - }, - Image: "quay.io/nirsof/busybox:stable", - ImagePullPolicy: "IfNotPresent", - Name: "logger", - TerminationMessagePath: "/dev/termination-log", - TerminationMessagePolicy: "File", - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/var/log", - Name: "varlog", - }, - }, - }, - }, - DNSPolicy: corev1.DNSClusterFirst, - Volumes: []corev1.Volume{ - { - Name: "varlog", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "busybox-pvc-vol", - }, - }, - }, - }, - }, - }, - }, - } -} +// func getPvc(ns string) *corev1.PersistentVolumeClaim { +// scName := "rook-ceph-block" +// return &corev1.PersistentVolumeClaim{ +// TypeMeta: metav1.TypeMeta{ +// APIVersion: "v1", +// Kind: "PersistentVolumeClaim", +// }, +// ObjectMeta: metav1.ObjectMeta{ +// Name: "busybox-pvc-vol", +// Namespace: ns, +// Labels: map[string]string{ +// "appname": "busybox-vol", +// }, +// }, +// Spec: corev1.PersistentVolumeClaimSpec{ +// AccessModes: []corev1.PersistentVolumeAccessMode{ +// corev1.ReadWriteOnce, +// }, +// Resources: corev1.VolumeResourceRequirements{ +// Requests: corev1.ResourceList{ +// corev1.ResourceStorage: resource.MustParse("1Gi"), +// }, +// }, +// StorageClassName: &scName, +// }, +// } +// } + +// func getDeployment(ns string) *appsv1.Deployment { +// var i int32 = 1 +// return &appsv1.Deployment{ +// TypeMeta: metav1.TypeMeta{ +// APIVersion: "apps/v1", +// Kind: "Deployment", +// }, +// ObjectMeta: metav1.ObjectMeta{ +// Labels: map[string]string{ +// "appname": "busybox-vol", +// }, +// Name: "busybox-vol", +// Namespace: ns, +// }, +// Spec: appsv1.DeploymentSpec{ +// Replicas: &i, +// Selector: &metav1.LabelSelector{ +// MatchLabels: map[string]string{ +// "appname": "busybox-vol", +// }, +// }, +// Template: corev1.PodTemplateSpec{ +// ObjectMeta: metav1.ObjectMeta{ +// Labels: map[string]string{ +// "appname": "busybox-vol", +// }, +// }, +// Spec: corev1.PodSpec{ +// Containers: []corev1.Container{ +// { +// Command: []string{ +// "sh", +// "-c", +// `emit() { +// echo "$(date) $1" | tee -a /var/log/ramen.log +// sync +// } +// trap "emit STOP; exit" TERM +// emit START +// while true; do +// sleep 10 & wait +// emit UPDATE +// done`, +// }, +// Image: "quay.io/nirsof/busybox:stable", +// ImagePullPolicy: "IfNotPresent", +// Name: "logger", +// TerminationMessagePath: "/dev/termination-log", +// TerminationMessagePolicy: "File", +// VolumeMounts: []corev1.VolumeMount{ +// { +// MountPath: "/var/log", +// Name: "varlog", +// }, +// }, +// }, +// }, +// DNSPolicy: corev1.DNSClusterFirst, +// Volumes: []corev1.Volume{ +// { +// Name: "varlog", +// VolumeSource: corev1.VolumeSource{ +// PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ +// ClaimName: "busybox-pvc-vol", +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// } +// } // Undeploy deletes the workload from the managed clusters. func (d DiscoveredApp) Undeploy(ctx types.Context) error { diff --git a/e2e/dractions/discovered.go b/e2e/dractions/discovered.go index b365e2d87..6bfa00d6e 100644 --- a/e2e/dractions/discovered.go +++ b/e2e/dractions/discovered.go @@ -8,6 +8,7 @@ import ( "github.com/ramendr/ramen/e2e/deployers" "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func EnableProtectionDiscoveredApps(ctx types.Context) error { @@ -48,13 +49,14 @@ func EnableProtectionDiscoveredApps(ctx types.Context) error { drpc := generateDRPCDiscoveredApps( name, managementNamespace, clusterName, drPolicyName, placementName, appname, appNamespace) - if v, ok := ctx.Deployer().(deployers.DiscoveredApp); ok { + if v, ok := ctx.Deployer().(*deployers.DiscoveredApp); ok { if v.IncludeRecipe { recipeName := name + "-recipe" drpc.Spec.KubeObjectProtection.RecipeRef = &ramen.RecipeRef{ Namespace: appNamespace, Name: recipeName, } + drpc.Spec.PVCSelector = v1.LabelSelector{} } } diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index 6011924a6..799d49cf2 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -64,11 +64,11 @@ func Exhaustive(dt *testing.T) { t.Fatalf("Failed to ensure channel: %s", err) } - t.Cleanup(func() { + /*t.Cleanup(func() { if err := util.EnsureChannelDeleted(); err != nil { t.Fatalf("Failed to ensure channel deleted: %s", err) } - }) + })*/ generateWorkloads(Workloads)