diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 8e26bed02..301ec43c3 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -72,12 +72,15 @@ jobs: - name: Run e2e tests run: | + cat e2e/config.yaml.sample >> e2e/config.yaml cat ~/.config/drenv/rdr/config.yaml >> e2e/config.yaml make e2e-rdr - name: Gather environment data if: failure() working-directory: test + # Gathering typically takes less than 15 seconds. + timeout-minutes: 3 run: drenv gather --directory ${{ env.GATHER_DIR }} envs/regional-dr.yaml # Tar manually to work around github limitations with special chracters (:) diff --git a/.gitignore b/.gitignore index f4de2acb8..36813ec25 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,9 @@ /bin /testbin/* +# config files +/e2e/config.yaml + # Test binary, build with `go test -c` *.test diff --git a/.golangci.yaml b/.golangci.yaml index 1488b0a82..4ae25207b 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -47,7 +47,6 @@ linters-settings: - name: error-strings - name: error-naming - name: exported - - name: if-return - name: increment-decrement - name: var-naming - name: var-declaration diff --git a/e2e/config.yaml b/e2e/config.yaml.sample similarity index 87% rename from e2e/config.yaml rename to e2e/config.yaml.sample index 34b779f03..f44ed19c3 100644 --- a/e2e/config.yaml +++ b/e2e/config.yaml.sample @@ -9,5 +9,3 @@ pvcspecs: - name: cephfs storageclassname: rook-cephfs accessmodes: ReadWriteMany - unsupportedDeployers: - - disapp diff --git a/e2e/deployers/discoveredapp.go b/e2e/deployers/discoveredapp.go index 369d6eb46..8699225a9 100644 --- a/e2e/deployers/discoveredapp.go +++ b/e2e/deployers/discoveredapp.go @@ -4,17 +4,41 @@ package deployers import ( + "context" "fmt" "os" "os/exec" "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" + recipe "github.com/ramendr/recipe/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) -type DiscoveredApp struct{} +const timeout = 300 + +type DiscoveredApp struct { + IncludeRecipe bool + IncludeHooks bool + IncludeVolumes bool +} func (d DiscoveredApp) GetName() string { + if d.IncludeRecipe { + if d.IncludeHooks { + if d.IncludeVolumes { + return "disapp-rhv" + } + + return "disapp-recipe-hooks" + } + + return "disapp-recipe" + } + return "disapp" } @@ -68,9 +92,135 @@ func (d DiscoveredApp) Deploy(ctx types.Context) error { log.Info("Workload deployed") + // recipe needs to be created based on flags + if d.IncludeRecipe { + recipeName := ctx.Name() + "-recipe" + if err := d.createRecipe(recipeName, appNamespace); err != nil { + log.Info("recipe creation failed") + } + + log.Info("recipe created on both dr clusters") + } + + // if d.IncludeHooks && d.IncludeRecipe && d.IncludeVolumes { + // deployment := getDeployment(appNamespace) + // err := util.Ctx.C1.Client.Create(context.Background(), deployment) + // if err != nil { + // log.Error("error during creation of deployment") + // } + + // pvc := getPvc(appNamespace) + // err = util.Ctx.C1.Client.Create(context.Background(), pvc) + // if err != nil { + // log.Error("error during creation of pvc") + // } + // } + return nil } +// func getPvc(ns string) *corev1.PersistentVolumeClaim { +// scName := "rook-ceph-block" +// return &corev1.PersistentVolumeClaim{ +// TypeMeta: metav1.TypeMeta{ +// APIVersion: "v1", +// Kind: "PersistentVolumeClaim", +// }, +// ObjectMeta: metav1.ObjectMeta{ +// Name: "busybox-pvc-vol", +// Namespace: ns, +// Labels: map[string]string{ +// "appname": "busybox-vol", +// }, +// }, +// Spec: corev1.PersistentVolumeClaimSpec{ +// AccessModes: []corev1.PersistentVolumeAccessMode{ +// corev1.ReadWriteOnce, +// }, +// Resources: corev1.VolumeResourceRequirements{ +// Requests: corev1.ResourceList{ +// corev1.ResourceStorage: resource.MustParse("1Gi"), +// }, +// }, +// StorageClassName: &scName, +// }, +// } +// } + +// func getDeployment(ns string) *appsv1.Deployment { +// var i int32 = 1 +// return &appsv1.Deployment{ +// TypeMeta: metav1.TypeMeta{ +// APIVersion: "apps/v1", +// Kind: "Deployment", +// }, +// ObjectMeta: metav1.ObjectMeta{ +// Labels: map[string]string{ +// "appname": "busybox-vol", +// }, +// Name: "busybox-vol", +// Namespace: ns, +// }, +// Spec: appsv1.DeploymentSpec{ +// Replicas: &i, +// Selector: &metav1.LabelSelector{ +// MatchLabels: map[string]string{ +// "appname": "busybox-vol", +// }, +// }, +// Template: corev1.PodTemplateSpec{ +// ObjectMeta: metav1.ObjectMeta{ +// Labels: map[string]string{ +// "appname": "busybox-vol", +// }, +// }, +// Spec: corev1.PodSpec{ +// Containers: []corev1.Container{ +// { +// Command: []string{ +// "sh", +// "-c", +// `emit() { +// echo "$(date) $1" | tee -a /var/log/ramen.log +// sync +// } +// trap "emit STOP; exit" TERM +// emit START +// while true; do +// sleep 10 & wait +// emit UPDATE +// done`, +// }, +// Image: "quay.io/nirsof/busybox:stable", +// ImagePullPolicy: "IfNotPresent", +// Name: "logger", +// TerminationMessagePath: "/dev/termination-log", +// TerminationMessagePolicy: "File", +// VolumeMounts: []corev1.VolumeMount{ +// { +// MountPath: "/var/log", +// Name: "varlog", +// }, +// }, +// }, +// }, +// DNSPolicy: corev1.DNSClusterFirst, +// Volumes: []corev1.Volume{ +// { +// Name: "varlog", +// VolumeSource: corev1.VolumeSource{ +// PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ +// ClaimName: "busybox-pvc-vol", +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// } +// } + // Undeploy deletes the workload from the managed clusters. func (d DiscoveredApp) Undeploy(ctx types.Context) error { log := ctx.Logger() @@ -96,6 +246,22 @@ func (d DiscoveredApp) Undeploy(ctx types.Context) error { return err } + if d.IncludeRecipe { + recipeName := ctx.Name() + "-recipe" + + log.Infof("Deleting recipe on cluster %q", drpolicy.Spec.DRClusters[0]) + + if err := deleteRecipe(util.Ctx.C1.Client, recipeName, appNamespace); err != nil { + return err + } + + log.Infof("Deleting recipe on cluster %q", drpolicy.Spec.DRClusters[1]) + + if err := deleteRecipe(util.Ctx.C2.Client, recipeName, appNamespace); err != nil { + return err + } + } + log.Infof("Deleting namespace %q on cluster %q", appNamespace, drpolicy.Spec.DRClusters[0]) // delete namespace on both clusters @@ -117,3 +283,210 @@ func (d DiscoveredApp) Undeploy(ctx types.Context) error { func (d DiscoveredApp) IsDiscovered() bool { return true } + +func (d DiscoveredApp) getRecipe(name, namespace string) *recipe.Recipe { + var recipe recipe.Recipe + if d.IncludeHooks { + recipe = getRecipeWithHooks(name, namespace) + if d.IncludeVolumes { + volumes := getVolumes(namespace) + + recipe.Spec.Volumes = volumes + // along with these changes another namespace or within the same ns, + // pod and pvc should be created which recipe volumes will refer to + } + } else { + recipe = getRecipeWithoutHooks(name, namespace) + } + return &recipe +} + +func (d DiscoveredApp) createRecipe(name, namespace string) error { + err := util.Ctx.C1.Client.Create(context.Background(), d.getRecipe(name, namespace)) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("recipe " + name + " already exists" + " in the cluster " + "C1") + } + + err = util.Ctx.C2.Client.Create(context.Background(), d.getRecipe(name, namespace)) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("recipe " + name + " already exists" + " in the cluster " + "C2") + } + + return nil +} + +func getVolumes(ns string) *recipe.Group { + return &recipe.Group{ + IncludedNamespaces: []string{ + ns, + }, + Name: "volumes-test", + Type: "volume", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "appname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"busybox-vol"}, + }, + }, + }, + } +} + +func getRecipeWithoutHooks(name, namespace string) recipe.Recipe { + return recipe.Recipe{ + TypeMeta: metav1.TypeMeta{ + Kind: "Recipe", + APIVersion: "ramendr.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: recipe.RecipeSpec{ + AppType: "busybox", + Groups: []*recipe.Group{ + { + Name: "rg1", + Type: "resource", + BackupRef: "rg1", + IncludedNamespaces: []string{ + namespace, + }, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "appname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"busybox"}, + }, + }, + }, + }, + }, + Workflows: []*recipe.Workflow{ + { + Name: "backup", + Sequence: []map[string]string{ + { + "group": "rg1", + }, + }, + }, + { + Name: "restore", + Sequence: []map[string]string{ + { + "group": "rg1", + }, + }, + }, + }, + }, + } +} + +func getRecipeWithHooks(name, namespace string) recipe.Recipe { + return recipe.Recipe{ + TypeMeta: metav1.TypeMeta{ + Kind: "Recipe", + APIVersion: "ramendr.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: recipe.RecipeSpec{ + AppType: "busybox", + Groups: []*recipe.Group{ + { + Name: "rg1", + Type: "resource", + BackupRef: "rg1", + IncludedNamespaces: []string{ + namespace, + }, + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "appname", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"busybox"}, + }, + }, + }, + }, + }, + Hooks: []*recipe.Hook{ + getHookSpec(namespace, "backup"), + getHookSpec(namespace, "restore"), + }, + Workflows: []*recipe.Workflow{ + { + Name: "backup", + Sequence: []map[string]string{ + { + "hook": "backup/check-replicas", + }, + { + "group": "rg1", + }, + }, + }, + { + Name: "restore", + Sequence: []map[string]string{ + { + "group": "rg1", + }, + { + "hook": "restore/check-replicas", + }, + }, + }, + }, + }, + } +} + +func getHookSpec(namespace, hookType string) *recipe.Hook { + return &recipe.Hook{ + Name: hookType, + Type: "check", + Namespace: namespace, + NameSelector: "busybox", + SelectResource: "deployment", + Timeout: timeout, + Chks: []*recipe.Check{ + { + Name: "check-replicas", + Condition: "{$.spec.replicas} == {$.status.readyReplicas}", + }, + }, + } +} + +func deleteRecipe(client client.Client, name, namespace string) error { + r := &recipe.Recipe{} + key := k8stypes.NamespacedName{Namespace: namespace, Name: name} + + err := client.Get(context.Background(), key, r) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + return nil + } + + return client.Delete(context.Background(), r) +} diff --git a/e2e/dractions/discovered.go b/e2e/dractions/discovered.go index 84b5ab854..6bfa00d6e 100644 --- a/e2e/dractions/discovered.go +++ b/e2e/dractions/discovered.go @@ -8,6 +8,7 @@ import ( "github.com/ramendr/ramen/e2e/deployers" "github.com/ramendr/ramen/e2e/types" "github.com/ramendr/ramen/e2e/util" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func EnableProtectionDiscoveredApps(ctx types.Context) error { @@ -47,6 +48,18 @@ func EnableProtectionDiscoveredApps(ctx types.Context) error { drpc := generateDRPCDiscoveredApps( name, managementNamespace, clusterName, drPolicyName, placementName, appname, appNamespace) + + if v, ok := ctx.Deployer().(*deployers.DiscoveredApp); ok { + if v.IncludeRecipe { + recipeName := name + "-recipe" + drpc.Spec.KubeObjectProtection.RecipeRef = &ramen.RecipeRef{ + Namespace: appNamespace, + Name: recipeName, + } + drpc.Spec.PVCSelector = v1.LabelSelector{} + } + } + if err = createDRPC(util.Ctx.Hub.Client, drpc); err != nil { return err } diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index cd794f56d..799d49cf2 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -25,11 +25,18 @@ const ( ) var ( - Workloads = []types.Workload{} - subscription = &deployers.Subscription{} - appset = &deployers.ApplicationSet{} - discoveredApps = &deployers.DiscoveredApp{} - Deployers = []types.Deployer{subscription, appset, discoveredApps} + Workloads = []types.Workload{} + subscription = &deployers.Subscription{} + appset = &deployers.ApplicationSet{} + discoveredApps = &deployers.DiscoveredApp{} + discoveredAppsWithoutHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: false} + discoveredAppsWithHook = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: true} + discoveredAppsWithHookAndVol = &deployers.DiscoveredApp{IncludeRecipe: true, IncludeHooks: true, + IncludeVolumes: true} + Deployers = []types.Deployer{ + subscription, appset, discoveredApps, discoveredAppsWithoutHook, + discoveredAppsWithHook, discoveredAppsWithHookAndVol, + } ) func generateWorkloads([]types.Workload) { @@ -57,11 +64,11 @@ func Exhaustive(dt *testing.T) { t.Fatalf("Failed to ensure channel: %s", err) } - t.Cleanup(func() { + /*t.Cleanup(func() { if err := util.EnsureChannelDeleted(); err != nil { t.Fatalf("Failed to ensure channel deleted: %s", err) } - }) + })*/ generateWorkloads(Workloads) diff --git a/e2e/go.mod b/e2e/go.mod index 76229bff2..1f6e35e47 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -6,6 +6,7 @@ toolchain go1.22.7 require ( github.com/ramendr/ramen/api v0.0.0-00010101000000-000000000000 + github.com/ramendr/recipe v0.0.0-20241009174526-5cecfd571447 github.com/spf13/viper v1.19.0 go.uber.org/zap v1.27.0 k8s.io/api v0.31.1 @@ -21,7 +22,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -57,13 +58,13 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/time v0.6.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -71,8 +72,8 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240411171206-dc4e619f62f3 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/e2e/go.sum b/e2e/go.sum index f5a24ed73..a1225992c 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -6,8 +6,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -45,8 +45,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= -github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -59,6 +59,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -88,14 +90,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/ramendr/recipe v0.0.0-20241009174526-5cecfd571447 h1:RSb0XKjpxH0qln4a8Ebm5TTtrW2E3uLhdJs6FSMf8ik= +github.com/ramendr/recipe v0.0.0-20241009174526-5cecfd571447/go.mod h1:dGXrk743fq6VG8u6lflEce7ITM7d/9xSBeAbI2RXl9s= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= @@ -137,8 +141,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= -golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -147,8 +151,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -163,14 +167,14 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -197,8 +201,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= @@ -207,12 +211,12 @@ k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240411171206-dc4e619f62f3 h1:SbdLaI6mM6ffDSJCadEaD4IkuPzepLDGlkd2xV0t1uA= -k8s.io/kube-openapi v0.0.0-20240411171206-dc4e619f62f3/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.15.0 h1:lRee1KOlGHZb2scTA7ff9E9Fxt2hJc7jpkHnaCbvkOU= open-cluster-management.io/api v0.15.0/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= open-cluster-management.io/multicloud-operators-channel v0.15.0 h1:5DjxcZrhQhS/+A+zYx0/tUqKqE8m4mped3Gb9aRt6CE= diff --git a/e2e/util/context.go b/e2e/util/context.go index 80c08af25..6f18ff34f 100644 --- a/e2e/util/context.go +++ b/e2e/util/context.go @@ -20,6 +20,7 @@ import ( ramen "github.com/ramendr/ramen/api/v1alpha1" argocdv1alpha1hack "github.com/ramendr/ramen/e2e/argocd" + recipe "github.com/ramendr/recipe/api/v1alpha1" subscription "open-cluster-management.io/multicloud-operators-subscription/pkg/apis" placementrule "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" ) @@ -60,6 +61,10 @@ func addToScheme(scheme *runtime.Scheme) error { return err } + if err := recipe.AddToScheme(scheme); err != nil { + return err + } + return ramen.AddToScheme(scheme) } diff --git a/go.mod b/go.mod index 38f11fa24..e88e92bfc 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/ramendr/ramen -go 1.22.5 +go 1.22.7 -toolchain go1.22.7 +toolchain go1.22.9 // This replace should always be here for ease of development. replace github.com/ramendr/ramen/api => ./api @@ -10,7 +10,7 @@ replace github.com/ramendr/ramen/api => ./api require ( github.com/aws/aws-sdk-go v1.55.5 github.com/backube/volsync v0.11.0 - github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 + github.com/csi-addons/kubernetes-csi-addons v0.11.0 github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 @@ -25,9 +25,9 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 golang.org/x/time v0.8.0 - k8s.io/api v0.31.1 + k8s.io/api v0.31.2 k8s.io/apiextensions-apiserver v0.31.1 - k8s.io/apimachinery v0.31.1 + k8s.io/apimachinery v0.31.2 k8s.io/client-go v12.0.0+incompatible k8s.io/component-base v0.31.1 k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 @@ -36,7 +36,7 @@ require ( open-cluster-management.io/config-policy-controller v0.15.0 open-cluster-management.io/governance-policy-propagator v0.15.0 open-cluster-management.io/multicloud-operators-subscription v0.15.0 - sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/controller-runtime v0.19.1 sigs.k8s.io/yaml v1.4.0 ) @@ -99,7 +99,7 @@ require ( golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 17f81ffbd..fa7619800 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 h1:9mh79gS8O8uO5okZ2DhFO0LSrhpVXd9R9DLvbnh2He4= -github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1/go.mod h1:LeY7UYm8nEBCG1RcJG0DHmJbva0ILmtp+kcegxRuHhc= +github.com/csi-addons/kubernetes-csi-addons v0.11.0 h1:0f6AIXcpu68Vj0Q1IKij1l6arJfKFiaTZ9GwHuvLm/o= +github.com/csi-addons/kubernetes-csi-addons v0.11.0/go.mod h1:HJd3znD4i5D92/2PKqzrwBg5Q7Ur2me20VYakdBHzpk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -209,8 +209,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -226,12 +226,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= @@ -252,8 +252,8 @@ open-cluster-management.io/governance-policy-propagator v0.15.0 h1:tSDJcq8p/UQHB open-cluster-management.io/governance-policy-propagator v0.15.0/go.mod h1:I1LbX78mavWMv6W3YAeSjCq2YBfSS0RpOBWOskpbLng= open-cluster-management.io/multicloud-operators-subscription v0.15.0 h1:/FPaCfTn8PaDQCYMAhDw7xdH4TsaQlV6Ufi9zyWwyYw= open-cluster-management.io/multicloud-operators-subscription v0.15.0/go.mod h1:lDMnGyFWoyWFjrAJRrnnWz5Gz2IUsqRsvPV44ll7zXc= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= +sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/internal/controller/cephfscg/replicationgroupsource.go b/internal/controller/cephfscg/replicationgroupsource.go index 477d3fbc3..71a1258e9 100644 --- a/internal/controller/cephfscg/replicationgroupsource.go +++ b/internal/controller/cephfscg/replicationgroupsource.go @@ -113,15 +113,6 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), err } - m.Logger.Info("Restore PVCs from volume group snapshot") - - restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) - if err != nil { - m.Logger.Error(err, "Failed to restore volume group snapshot") - - return mover.InProgress(), err - } - m.Logger.Info("Create ReplicationSource for each Restored PVC") vrgName := m.ReplicationGroupSource.GetLabels()[volsync.VRGOwnerNameLabel] // Pre-allocated shared secret - DRPC will generate and propagate this secret from hub to clusters @@ -141,6 +132,15 @@ func (m *replicationGroupSourceMachine) Synchronize(ctx context.Context) (mover. return mover.InProgress(), nil } + m.Logger.Info("Restore PVCs from volume group snapshot") + + restoredPVCs, err := m.VolumeGroupHandler.RestoreVolumesFromVolumeGroupSnapshot(ctx, m.ReplicationGroupSource) + if err != nil { + m.Logger.Error(err, "Failed to restore volume group snapshot") + + return mover.InProgress(), err + } + replicationSources, err := m.VolumeGroupHandler.CreateOrUpdateReplicationSourceForRestoredPVCs( ctx, m.ReplicationGroupSource.Status.LastSyncStartTime.String(), restoredPVCs, m.ReplicationGroupSource) if err != nil { diff --git a/internal/controller/cephfscg/utils.go b/internal/controller/cephfscg/utils.go index 9790ab32f..070232df9 100644 --- a/internal/controller/cephfscg/utils.go +++ b/internal/controller/cephfscg/utils.go @@ -11,10 +11,8 @@ import ( "github.com/ramendr/ramen/internal/controller/volsync" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) // ------------- [Begin] Copied from existing code in Ramen ---- @@ -54,55 +52,6 @@ func getLocalServiceNameForRD(rdName string) string { return fmt.Sprintf("volsync-rsync-tls-dst-%s", rdName) } -// ------------- [End] Copied from existing code in Ramen ---- - -// ------------- [Begin] Edited from existing code in Ramen ---- - -// Copied from func (v *VSHandler) ModifyRSSpecForCephFS -func GetRestoreStorageClass( - ctx context.Context, k8sClient client.Client, storageClassName string, - defaultCephFSCSIDriverName string, -) (*storagev1.StorageClass, error) { - storageClass, err := GetStorageClass(ctx, k8sClient, &storageClassName) - if err != nil { - return nil, err - } - - if storageClass.Provisioner != defaultCephFSCSIDriverName { - return storageClass, nil // No workaround required - } - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - _, err = ctrlutil.CreateOrUpdate(ctx, k8sClient, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("%w", err) - } - - return readOnlyPVCStorageClass, nil -} - // Copied from func (v *VSHandler) getStorageClass( func GetStorageClass( ctx context.Context, k8sClient client.Client, storageClassName *string, diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 8e962451b..e92e02a7c 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -26,8 +26,8 @@ import ( ) var ( - VolumeGroupSnapshotNameFormat = "cephfscg-%s" - RestorePVCinCGNameFormat = "cephfscg-%s" + VolumeGroupSnapshotNameFormat = "vs-cg-%s" + RestorePVCinCGNameFormat = "vs-cg-%s" SnapshotGroup = "snapshot.storage.k8s.io" SnapshotGroupKind = "VolumeSnapshot" ) @@ -146,7 +146,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( return nil } -// CleanVolumeGroupSnapshot delete restored pvc, replicationsource and VolumeGroupSnapshot +// CleanVolumeGroupSnapshot delete restored pvc and VolumeGroupSnapshot // //nolint:funlen func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( @@ -214,42 +214,48 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( return nil } -// RestoreVolumesFromVolumeGroupSnapshot restore VolumeGroupSnapshot to PVCs +// RestoreVolumesFromVolumeGroupSnapshot restores VolumeGroupSnapshot to PVCs +// +//nolint:funlen,cyclop func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( ctx context.Context, owner metav1.Object, ) ([]RestoredPVC, error) { logger := h.Logger.WithName("RestoreVolumesFromVolumeGroupSnapshot") logger.Info("Get volume group snapshot") - volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} + vgs := &vgsv1alphfa1.VolumeGroupSnapshot{} if err := h.Client.Get(ctx, types.NamespacedName{Name: h.VolumeGroupSnapshotName, Namespace: h.VolumeGroupSnapshotNamespace}, - volumeGroupSnapshot); err != nil { + vgs); err != nil { return nil, fmt.Errorf("failed to get volume group snapshot: %w", err) } - if volumeGroupSnapshot.Status == nil || volumeGroupSnapshot.Status.ReadyToUse == nil || - (volumeGroupSnapshot.Status.ReadyToUse != nil && !*volumeGroupSnapshot.Status.ReadyToUse) { + if vgs.Status == nil || vgs.Status.ReadyToUse == nil || + (vgs.Status.ReadyToUse != nil && !*vgs.Status.ReadyToUse) { return nil, fmt.Errorf("can't restore volume group snapshot: volume group snapshot is not ready to be used") } restoredPVCs := []RestoredPVC{} - for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { + for _, pvcVSRef := range vgs.Status.PVCVolumeSnapshotRefList { logger.Info("Get PVCName from volume snapshot", "PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name) pvc, err := util.GetPVC(ctx, h.Client, - types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) + types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: vgs.Namespace}) if err != nil { return nil, fmt.Errorf("failed to get PVC from VGS %s: %w", - volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) + vgs.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } - restoreStorageClass, err := GetRestoreStorageClass(ctx, h.Client, - *pvc.Spec.StorageClassName, h.DefaultCephFSCSIDriverName) + storageClass, err := GetStorageClass(ctx, h.Client, pvc.Spec.StorageClassName) if err != nil { - return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+pvc.Namespace, err) + return nil, err + } + + restoreAccessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + if storageClass.Provisioner != h.DefaultCephFSCSIDriverName { + restoreAccessModes = pvc.Spec.AccessModes } RestoredPVCNamespacedName := types.NamespacedName{ @@ -258,7 +264,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( } if err := h.RestoreVolumesFromSnapshot( ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName, - restoreStorageClass.GetName(), owner); err != nil { + restoreAccessModes, owner); err != nil { return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err) } @@ -286,7 +292,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( vsName string, pvc *corev1.PersistentVolumeClaim, restoredPVCNamespacedname types.NamespacedName, - restoreStorageClassName string, + restoreAccessModes []corev1.PersistentVolumeAccessMode, owner metav1.Object, ) error { logger := h.Logger.WithName("RestoreVolumesFromSnapshot"). @@ -351,8 +357,8 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( } if restoredPVC.CreationTimestamp.IsZero() { // set immutable fields - restoredPVC.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - restoredPVC.Spec.StorageClassName = &restoreStorageClassName + restoredPVC.Spec.AccessModes = restoreAccessModes + restoredPVC.Spec.StorageClassName = pvc.Spec.StorageClassName restoredPVC.Spec.DataSource = &snapshotRef } @@ -424,8 +430,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateReplicationSourceForRestoredPVC } replicationSource.Spec.RsyncTLS = &volsyncv1alpha1.ReplicationSourceRsyncTLSSpec{ ReplicationSourceVolumeOptions: volsyncv1alpha1.ReplicationSourceVolumeOptions{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, - CopyMethod: volsyncv1alpha1.CopyMethodDirect, + CopyMethod: volsyncv1alpha1.CopyMethodDirect, }, KeySecret: &h.VolsyncKeySecretName, diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index fea58b024..35800c903 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -1711,6 +1711,7 @@ func (d *DRPCInstance) updateVRGOptionalFields(vrg, vrgFromView *rmn.VolumeRepli DoNotDeletePVCAnnotation: d.instance.GetAnnotations()[DoNotDeletePVCAnnotation], DRPCUIDAnnotation: string(d.instance.UID), rmnutil.IsCGEnabledAnnotation: d.instance.GetAnnotations()[rmnutil.IsCGEnabledAnnotation], + rmnutil.UseVolSyncAnnotation: d.instance.GetAnnotations()[rmnutil.UseVolSyncAnnotation], } vrg.Spec.ProtectedNamespaces = d.instance.Spec.ProtectedNamespaces diff --git a/internal/controller/drplacementcontrol_controller.go b/internal/controller/drplacementcontrol_controller.go index f3399490a..ff45e54f7 100644 --- a/internal/controller/drplacementcontrol_controller.go +++ b/internal/controller/drplacementcontrol_controller.go @@ -879,7 +879,7 @@ func getPlacementOrPlacementRule( usrPlacement, err = getPlacementRule(ctx, k8sclient, drpc, log) if err != nil { if k8serrors.IsNotFound(err) { - // PacementRule not found. Check Placement instead + // PlacementRule not found. Check Placement instead usrPlacement, err = getPlacement(ctx, k8sclient, drpc, log) } @@ -2157,7 +2157,7 @@ func (r *DRPlacementControlReconciler) determineDRPCState( } msg := fmt.Sprintf("Failover is allowed - VRGs count:'%d'. drpcAction:'%s'."+ - " vrgAction:'%s'. DstCluster:'%s'. vrgOnCluste '%s'", + " vrgAction:'%s'. DstCluster:'%s'. vrgOnCluster '%s'", len(vrgs), drpc.Spec.Action, vrg.Spec.Action, dstCluster, clusterName) return AllowFailover, msg, nil diff --git a/internal/controller/drplacementcontrol_controller_test.go b/internal/controller/drplacementcontrol_controller_test.go index 413d45af3..7a068b78b 100644 --- a/internal/controller/drplacementcontrol_controller_test.go +++ b/internal/controller/drplacementcontrol_controller_test.go @@ -396,7 +396,7 @@ func setRestorePVsComplete() { restorePVs = true } -func setRestorePVsUncomplete() { +func setRestorePVsIncomplete() { restorePVs = false } @@ -1300,7 +1300,7 @@ func getManagedClusterViewCount(homeClusterNamespace string) int { } func verifyUserPlacementRuleDecision(name, namespace, homeCluster string) { - usrPlcementLookupKey := types.NamespacedName{ + usrPlacementLookupKey := types.NamespacedName{ Name: name, Namespace: namespace, } @@ -1310,10 +1310,10 @@ func verifyUserPlacementRuleDecision(name, namespace, homeCluster string) { var placementObj client.Object Eventually(func() bool { - err := k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlRule) + err := k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlRule) if k8serrors.IsNotFound(err) { usrPlmnt := &clrapiv1beta1.Placement{} - err = k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlmnt) + err = k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlmnt) if err != nil { return false } @@ -1351,7 +1351,7 @@ func getPlacementDecision(plName, plNamespace string) *clrapiv1beta1.PlacementDe //nolint:unparam func verifyUserPlacementRuleDecisionUnchanged(name, namespace, homeCluster string) { - usrPlcementLookupKey := types.NamespacedName{ + usrPlacementLookupKey := types.NamespacedName{ Name: name, Namespace: namespace, } @@ -1361,10 +1361,10 @@ func verifyUserPlacementRuleDecisionUnchanged(name, namespace, homeCluster strin var placementObj client.Object Consistently(func() bool { - err := k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlRule) + err := k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlRule) if k8serrors.IsNotFound(err) { usrPlmnt := &clrapiv1beta1.Placement{} - err = k8sClient.Get(context.TODO(), usrPlcementLookupKey, usrPlmnt) + err = k8sClient.Get(context.TODO(), usrPlacementLookupKey, usrPlmnt) if err != nil { return false } @@ -1952,7 +1952,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("DRAction changes to Failover", func() { It("Should not failover to Secondary (West1ManagedCluster) till PV manifest is applied", func() { By("\n\n*** Failover - 1\n\n") - setRestorePVsUncomplete() + setRestorePVsIncomplete() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(userPlacementRule.Name, userPlacementRule.Namespace, East1ManagedCluster) // MWs for VRG, NS, DRCluster, and MMode @@ -2064,7 +2064,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) When("DRAction changes to Failover using Placement with Subscription", func() { It("Should not failover to Secondary (West1ManagedCluster) till PV manifest is applied", func() { - setRestorePVsUncomplete() + setRestorePVsIncomplete() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(placement.Name, placement.Namespace, East1ManagedCluster) // MWs for VRG, NS, VRG DRCluster, and MMode @@ -2140,7 +2140,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) When("DRAction changes to Failover using Placement", func() { It("Should not failover to Secondary (West1ManagedCluster) till PV manifest is applied", func() { - setRestorePVsUncomplete() + setRestorePVsIncomplete() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(placement.Name, placement.Namespace, East1ManagedCluster) // MWs for VRG, NS, VRG DRCluster, and MMode @@ -2223,7 +2223,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("DRAction changes to Failover", func() { It("Should not failover to Secondary (East2ManagedCluster) till PV manifest is applied", func() { By("\n\n*** Failover - 1\n\n") - setRestorePVsUncomplete() + setRestorePVsIncomplete() fenceCluster(East1ManagedCluster, false) setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, East2ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(userPlacementRule.Name, userPlacementRule.Namespace, East1ManagedCluster) @@ -2298,7 +2298,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("DRAction changes to Failover", func() { It("Should not failover to Secondary (East2ManagedCluster) till PV manifest is applied", func() { By("\n\n*** Failover - 1\n\n") - setRestorePVsUncomplete() + setRestorePVsIncomplete() fenceCluster(East1ManagedCluster, true) setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, East2ManagedCluster, rmn.ActionFailover) verifyUserPlacementRuleDecisionUnchanged(userPlacementRule.Name, userPlacementRule.Namespace, East1ManagedCluster) @@ -2393,11 +2393,11 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearDRPCStatus() expectedAction := rmn.DRAction("") expectedPhase := rmn.Deployed - exptectedPorgression := rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) resetClusterDown() - exptectedCompleted := rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedCompleted) + expectedCompleted := rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedCompleted) }) }) //nolint:lll @@ -2417,8 +2417,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearDRPCStatus() expectedAction := rmn.DRAction("") expectedPhase := rmn.WaitForUser - exptectedPorgression := rmn.ProgressionActionPaused - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionActionPaused + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) }) }) @@ -2455,8 +2455,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) expectedAction := rmn.ActionFailover expectedPhase := rmn.WaitForUser - exptectedPorgression := rmn.ProgressionActionPaused - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionActionPaused + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) checkConditionAllowFailover(DefaultDRPCNamespace) // User intervention is required (simulate user intervention) @@ -2464,8 +2464,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionFailover) expectedAction = rmn.ActionFailover expectedPhase = rmn.FailedOver - exptectedPorgression = rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression = rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) waitForCompletion(string(rmn.FailedOver)) }) }) @@ -2497,16 +2497,16 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearDRPCStatus() expectedAction := rmn.ActionRelocate expectedPhase := rmn.DRState("") - exptectedPorgression := rmn.ProgressionStatus("") - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionStatus("") + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) // User intervention is required (simulate user intervention) resetClusterDown() setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionRelocate) expectedAction = rmn.ActionRelocate expectedPhase = rmn.Relocated - exptectedPorgression = rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression = rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) waitForCompletion(string(rmn.Relocated)) }) }) @@ -2528,8 +2528,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, "") expectedAction := rmn.DRAction("") expectedPhase := rmn.WaitForUser - exptectedPorgression := rmn.ProgressionActionPaused - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression := rmn.ProgressionActionPaused + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) checkConditionAllowFailover(DefaultDRPCNamespace) // User intervention is required (simulate user intervention) @@ -2537,8 +2537,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { setDRPCSpecExpectationTo(DefaultDRPCNamespace, East1ManagedCluster, West1ManagedCluster, rmn.ActionRelocate) expectedAction = rmn.ActionRelocate expectedPhase = rmn.Relocated - exptectedPorgression = rmn.ProgressionCompleted - verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) + expectedPorgression = rmn.ProgressionCompleted + verifyDRPCStateAndProgression(expectedAction, expectedPhase, expectedPorgression) waitForCompletion(string(rmn.Relocated)) }) }) @@ -2812,7 +2812,7 @@ func verifyRDSpecAfterActionSwitch(primaryCluster, secondaryCluster string, numO } func verifyDRPCStateAndProgression(expectedAction rmn.DRAction, expectedPhase rmn.DRState, - exptectedPorgression rmn.ProgressionStatus, + expectedPorgression rmn.ProgressionStatus, ) { var phase rmn.DRState @@ -2823,15 +2823,15 @@ func verifyDRPCStateAndProgression(expectedAction rmn.DRAction, expectedPhase rm phase = drpc.Status.Phase progression = drpc.Status.Progression - return phase == expectedPhase && progression == exptectedPorgression + return phase == expectedPhase && progression == expectedPorgression }, timeout, interval).Should(BeTrue(), - fmt.Sprintf("Phase has not been updated yet! Phase:%s Expected:%s - progression:%s exptected:%s", - phase, expectedPhase, progression, exptectedPorgression)) + fmt.Sprintf("Phase has not been updated yet! Phase:%s Expected:%s - progression:%s expected:%s", + phase, expectedPhase, progression, expectedPorgression)) drpc := getLatestDRPC(DefaultDRPCNamespace) Expect(drpc.Spec.Action).Should(Equal(expectedAction)) Expect(drpc.Status.Phase).Should(Equal(expectedPhase)) - Expect(drpc.Status.Progression).Should(Equal(exptectedPorgression)) + Expect(drpc.Status.Progression).Should(Equal(expectedPorgression)) } func checkConditionAllowFailover(namespace string) { diff --git a/internal/controller/util/json_util.go b/internal/controller/util/json_util.go index 6d0c03fb6..e97a2020f 100644 --- a/internal/controller/util/json_util.go +++ b/internal/controller/util/json_util.go @@ -30,7 +30,7 @@ const ( pInterval = 100 ) -func EvaluateCheckHook(k8sClient client.Client, hook *kubeobjects.HookSpec, log logr.Logger) (bool, error) { +func EvaluateCheckHook(k8sClient client.Reader, hook *kubeobjects.HookSpec, log logr.Logger) (bool, error) { if hook.LabelSelector == nil && hook.NameSelector == "" { return false, fmt.Errorf("either nameSelector or labelSelector should be provided to get resources") } @@ -91,7 +91,7 @@ func EvaluateCheckHookForObjects(objs []client.Object, hook *kubeobjects.HookSpe return finalRes, err } -func getResourcesList(k8sClient client.Client, hook *kubeobjects.HookSpec) ([]client.Object, error) { +func getResourcesList(k8sClient client.Reader, hook *kubeobjects.HookSpec) ([]client.Object, error) { resourceList := make([]client.Object, 0) var objList client.ObjectList @@ -128,14 +128,14 @@ func getResourcesList(k8sClient client.Client, hook *kubeobjects.HookSpec) ([]cl return resourceList, nil } -func getResourcesUsingLabelSelector(c client.Client, hook *kubeobjects.HookSpec, +func getResourcesUsingLabelSelector(c client.Reader, hook *kubeobjects.HookSpec, objList client.ObjectList, ) ([]client.Object, error) { filteredObjs := make([]client.Object, 0) selector, err := metav1.LabelSelectorAsSelector(hook.LabelSelector) if err != nil { - return filteredObjs, fmt.Errorf("error during labelSelector to selector conversion") + return filteredObjs, fmt.Errorf("error converting labelSelector to selector") } listOps := &client.ListOptions{ @@ -145,13 +145,13 @@ func getResourcesUsingLabelSelector(c client.Client, hook *kubeobjects.HookSpec, err = c.List(context.Background(), objList, listOps) if err != nil { - return filteredObjs, err + return filteredObjs, fmt.Errorf("error listing resources using labelSelector: %w", err) } return getObjectsBasedOnType(objList), nil } -func getResourcesUsingNameSelector(c client.Client, hook *kubeobjects.HookSpec, +func getResourcesUsingNameSelector(c client.Reader, hook *kubeobjects.HookSpec, objList client.ObjectList, ) ([]client.Object, error) { filteredObjs := make([]client.Object, 0) @@ -169,7 +169,7 @@ func getResourcesUsingNameSelector(c client.Client, hook *kubeobjects.HookSpec, err = c.List(context.Background(), objList, listOps) if err != nil { - return filteredObjs, err + return filteredObjs, fmt.Errorf("error listing resources using nameSelector: %w", err) } return getObjectsBasedOnType(objList), nil diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index 03b1f9b1e..7f0c9981e 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -408,13 +408,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo return nil, err } - volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) - if err != nil { - return nil, err - } + v.ModifyRSSpecForCephFS(&rsSpec, storageClass) - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(&rsSpec, storageClass) + volumeSnapshotClassName, err := v.getVolumeSnapshotClassFromPVCStorageClass(storageClass) if err != nil { return nil, err } @@ -1338,60 +1334,13 @@ func (v *VSHandler) getRsyncServiceType() *corev1.ServiceType { // For CephFS only, there is a problem where restoring a PVC from snapshot can be very slow when there are a lot of // files - on every replication cycle we need to create a PVC from snapshot in order to get a point-in-time copy of // the source PVC to sync with the replicationdestination. -// This workaround follows the instructions here: -// https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md -// -// Steps: -// 1. If the storageclass detected is cephfs, create a new storageclass with backingSnapshot: "true" parameter -// (or reuse if it already exists). If not cephfs, return and do not modify rsSpec. -// 2. Modify rsSpec to use the new storageclass and also update AccessModes to 'ReadOnlyMany' as per the instructions -// above. +// If CephFS PVC, modify rsSpec AccessModes to use 'ReadOnlyMany'. func (v *VSHandler) ModifyRSSpecForCephFS(rsSpec *ramendrv1alpha1.VolSyncReplicationSourceSpec, storageClass *storagev1.StorageClass, -) error { - if storageClass.Provisioner != v.defaultCephFSCSIDriverName { - return nil // No workaround required - } - - v.log.Info("CephFS storageclass detected on source PVC, creating replicationsource with read-only "+ - " PVC from snapshot", "storageClassName", storageClass.GetName()) - - // Create/update readOnlyPVCStorageClass - readOnlyPVCStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: storageClass.GetName() + "-vrg", - }, - } - - op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, readOnlyPVCStorageClass, func() error { - // Do not update the storageclass if it already exists - Provisioner and Parameters are immutable anyway - if readOnlyPVCStorageClass.CreationTimestamp.IsZero() { - readOnlyPVCStorageClass.Provisioner = storageClass.Provisioner - - // Copy other parameters from the original storage class - // Note - not copying volumebindingmode or reclaim policy from the source storageclass will leave defaults - readOnlyPVCStorageClass.Parameters = map[string]string{} - for k, v := range storageClass.Parameters { - readOnlyPVCStorageClass.Parameters[k] = v - } - - // Set backingSnapshot parameter to true - readOnlyPVCStorageClass.Parameters["backingSnapshot"] = "true" - } - - return nil - }) - if err != nil { - return fmt.Errorf("%w", err) +) { + if storageClass.Provisioner == v.defaultCephFSCSIDriverName { + rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} } - - v.log.Info("StorageClass for readonly cephfs PVC createOrUpdate Complete", "op", op) - - // Update the rsSpec with access modes and the special storageclass - rsSpec.ProtectedPVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} - rsSpec.ProtectedPVC.StorageClassName = &readOnlyPVCStorageClass.Name - - return nil } func (v *VSHandler) GetVolumeSnapshotClassFromPVCStorageClass(storageClassName *string) (string, error) { @@ -1821,21 +1770,10 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, ) { v.log.Info("Reconciling localRS", "RD", rd.GetName()) - storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) - if err != nil { - return nil, err - } - rsSpec := &ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: rdSpec.ProtectedPVC, } - // Fix for CephFS (replication source only) - may need different storageclass and access modes - err = v.ModifyRSSpecForCephFS(rsSpec, storageClass) - if err != nil { - return nil, err - } - pvc, err := v.setupLocalRS(rd, rdSpec, snapshotRef) if err != nil { return nil, err @@ -1983,16 +1921,20 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } // In all other cases, we have to create a RO PVC. - return v.createReadOnlyPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) + return v.createPVCFromSnapshot(rd, rdSpec, snapshotRef, restoreSize) } -func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, +func (v *VSHandler) createPVCFromSnapshot(rd *volsyncv1alpha1.ReplicationDestination, rdSpec *ramendrv1alpha1.VolSyncReplicationDestinationSpec, snapshotRef *corev1.TypedLocalObjectReference, snapRestoreSize *resource.Quantity, ) (*corev1.PersistentVolumeClaim, error) { - l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, - "snapRestoreSize", snapRestoreSize) + l := v.log.WithValues("pvcName", rd.GetName(), "snapshotRef", snapshotRef, "snapRestoreSize", snapRestoreSize) + + storageClass, err := v.getStorageClass(rdSpec.ProtectedPVC.StorageClassName) + if err != nil { + return nil, err + } pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -2017,6 +1959,11 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + // Use the protectedPVC accessModes when csi driver is not the default (openshift-storage.cephfs.csi.ceph.com) + if storageClass.Provisioner != v.defaultCephFSCSIDriverName { + accessModes = rdSpec.ProtectedPVC.AccessModes + } + if pvc.CreationTimestamp.IsZero() { // set immutable fields pvc.Spec.AccessModes = accessModes pvc.Spec.StorageClassName = rd.Spec.RsyncTLS.StorageClassName @@ -2032,8 +1979,6 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio return nil }) if err != nil { - l.Error(err, "Unable to createOrUpdate PVC from snapshot for localRS") - return nil, fmt.Errorf("error creating or updating PVC from snapshot for localRS (%w)", err) } diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 67f0dc8be..a3ebfb264 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -252,10 +252,18 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, storageClassForTest)).To(Succeed()) - // - // Call ModifyRSSpecForCephFS - // - Expect(vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest)).To(Succeed()) + vsHandler.ModifyRSSpecForCephFS(&testRsSpec, storageClassForTest) + if storageClassForTest.Provisioner == testCephFSStorageDriverName { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadOnlyMany, + })) + } else { + Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( + []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + })) + } }) Context("When the source PVC is not using a cephfs storageclass", func() { @@ -270,128 +278,13 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }) Context("When the sourcePVC is using a cephfs storageclass", func() { - customBackingSnapshotStorageClassName := testCephFSStorageClassName + "-vrg" - BeforeEach(func() { // Make sure the source PVC uses the cephfs storageclass testSourcePVC.Spec.StorageClassName = &testCephFSStorageClassName }) - JustBeforeEach(func() { - // Common tests - rsSpec should be modified with settings to allow pvc from snapshot - // to use our custom cephfs storageclass and ReadOnlyMany accessModes - Expect(testRsSpecOrig).NotTo(Equal(testRsSpec)) - - // Should use the custom storageclass with backingsnapshot: true parameter - Expect(*testRsSpec.ProtectedPVC.StorageClassName).To(Equal(customBackingSnapshotStorageClassName)) - - // AccessModes should be updated to ReadOnlyMany - Expect(testRsSpec.ProtectedPVC.AccessModes).To(Equal( - []corev1.PersistentVolumeAccessMode{ - corev1.ReadOnlyMany, - })) - }) - - AfterEach(func() { - // Delete the custom storage class that may have been created by test - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap does not exist", func() { - // Delete the custom vrg storageclass if it exists - BeforeEach(func() { - custStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - err := k8sClient.Delete(ctx, custStorageClass) - if err != nil { - Expect(kerrors.IsNotFound(err)).To(BeTrue()) - } - - Eventually(func() bool { - err := k8sClient.Get(ctx, client.ObjectKeyFromObject(custStorageClass), custStorageClass) - - return kerrors.IsNotFound(err) - }, maxWait, interval).Should(BeTrue()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec and create the new storageclass", func() { - // RSspec modification checks in the outer context JustBeforeEach() - - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - Expect(newStorageClass.Parameters["backingSnapshot"]).To(Equal("true")) - - // Other parameters from the test cephfs storageclass should be copied over - for k, v := range testCephFSStorageClass.Parameters { - Expect(newStorageClass.Parameters[k]).To(Equal(v)) - } - }) - }) - - Context("When the custom cephfs backing storage class for readonly pvc from snap exists", func() { - var preExistingCustStorageClass *storagev1.StorageClass - - BeforeEach(func() { - preExistingCustStorageClass = &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - Provisioner: testCephFSStorageDriverName, - Parameters: map[string]string{ // Not the same params as our CephFS storageclass for test - "different-param-1": "abc", - "different-param-2": "def", - "backingSnapshot": "true", - }, - } - Expect(k8sClient.Create(ctx, preExistingCustStorageClass)).To(Succeed()) - - // Confirm it's created - Eventually(func() error { - return k8sClient.Get(ctx, - client.ObjectKeyFromObject(preExistingCustStorageClass), preExistingCustStorageClass) - }, maxWait, interval).Should(Succeed()) - }) - - It("ModifyRSSpecForCephFS should modify the rsSpec but not modify the new custom storageclass", func() { - // Load the custom storageclass - newStorageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: customBackingSnapshotStorageClassName, - }, - } - - Eventually(func() error { - return k8sClient.Get(ctx, client.ObjectKeyFromObject(newStorageClass), newStorageClass) - }, maxWait, interval).Should(Succeed()) - - // Parameters should match the original, unmodified - Expect(newStorageClass.Parameters).To(Equal(preExistingCustStorageClass.Parameters)) - }) + It("ModifyRSSpecForCephFS should modify the rsSpec protectedPVC accessModes", func() { + Expect(testRsSpecOrig).ToNot(Equal(testRsSpec)) }) }) }) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index d96432b04..03a4c8edf 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -549,9 +549,10 @@ func (v *VRGInstance) processVRG() ctrl.Result { } } - if err := RecipeElementsGet( - v.ctx, v.reconciler.Client, *v.instance, *v.ramenConfig, v.log, &v.recipeElements, - ); err != nil { + var err error + + v.recipeElements, err = RecipeElementsGet(v.ctx, v.reconciler.Client, *v.instance, *v.ramenConfig, v.log) + if err != nil { return v.invalid(err, "Failed to get recipe", false) } @@ -759,6 +760,13 @@ func (v *VRGInstance) addConsistencyGroupLabel(pvc *corev1.PersistentVolumeClaim return fmt.Errorf("missing storageID for PVC %s/%s", pvc.GetNamespace(), pvc.GetName()) } + // FIXME: a temporary workaround for issue DFBUGS-1209 + // Remove this block once DFBUGS-1209 is fixed + storageID = "cephfs-" + storageID + if storageClass.Provisioner != DefaultCephFSCSIDriverName { + storageID = "rbd-" + storageID + } + // Add label for PVC, showing that this PVC is part of consistency group return util.NewResourceUpdater(pvc). AddLabel(ConsistencyGroupLabel, storageID). diff --git a/internal/controller/vrg_kubeobjects.go b/internal/controller/vrg_kubeobjects.go index 43722bd57..2d4fba5c3 100644 --- a/internal/controller/vrg_kubeobjects.go +++ b/internal/controller/vrg_kubeobjects.go @@ -288,7 +288,7 @@ func (v *VRGInstance) kubeObjectsCaptureStartOrResume( func (v *VRGInstance) executeHook(hook kubeobjects.HookSpec, log1 logr.Logger) error { if hook.Type == "check" { - hookResult, err := util.EvaluateCheckHook(v.reconciler.Client, &hook, log1) + hookResult, err := util.EvaluateCheckHook(v.reconciler.APIReader, &hook, log1) if err != nil { log1.Error(err, "error occurred during check hook ") @@ -820,6 +820,12 @@ func getCaptureGroups(recipe Recipe.Recipe) ([]kubeobjects.CaptureSpec, error) { for resourceType, resourceName := range resource { captureInstance, err := getResourceAndConvertToCaptureGroup(recipe, resourceType, resourceName) if err != nil { + if errors.Is(err, ErrVolumeCaptureNotSupported) { + // we only use the volumes group for determining the label selector + // ignore it in the capture sequence + continue + } + return resources, err } @@ -852,6 +858,12 @@ func getRecoverGroups(recipe Recipe.Recipe) ([]kubeobjects.RecoverSpec, error) { for resourceType, resourceName := range resource { captureInstance, err := getResourceAndConvertToRecoverGroup(recipe, resourceType, resourceName) if err != nil { + if errors.Is(err, ErrVolumeRecoverNotSupported) { + // we only use the volumes group for determining the label selector + // ignore it in the capture sequence + continue + } + return resources, err } @@ -862,6 +874,11 @@ func getRecoverGroups(recipe Recipe.Recipe) ([]kubeobjects.RecoverSpec, error) { return resources, nil } +var ( + ErrVolumeCaptureNotSupported = errors.New("volume capture not supported") + ErrVolumeRecoverNotSupported = errors.New("volume recover not supported") +) + func getResourceAndConvertToCaptureGroup( recipe Recipe.Recipe, resourceType, name string) (*kubeobjects.CaptureSpec, error, ) { @@ -873,6 +890,10 @@ func getResourceAndConvertToCaptureGroup( } } + if name == recipe.Spec.Volumes.Name { + return nil, ErrVolumeCaptureNotSupported + } + return nil, k8serrors.NewNotFound(schema.GroupResource{Resource: "Recipe.Spec.Group.Name"}, name) } @@ -904,6 +925,10 @@ func getResourceAndConvertToRecoverGroup( } } + if name == recipe.Spec.Volumes.Name { + return nil, ErrVolumeRecoverNotSupported + } + return nil, k8serrors.NewNotFound(schema.GroupResource{Resource: "Recipe.Spec.Group.Name"}, name) } diff --git a/internal/controller/vrg_pvc_selector_test.go b/internal/controller/vrg_pvc_selector_test.go index 318f8f9fa..9dd2d279b 100644 --- a/internal/controller/vrg_pvc_selector_test.go +++ b/internal/controller/vrg_pvc_selector_test.go @@ -41,10 +41,12 @@ var _ = Describe("VolumeReplicationGroupPVCSelector", func() { testCtx, cancel = context.WithCancel(context.TODO()) Expect(k8sClient).NotTo(BeNil()) vrgTestNamespace = createUniqueNamespace(testCtx) + ramenConfig.RamenOpsNamespace = vrgTestNamespace }) AfterEach(func() { Expect(k8sClient.Delete(testCtx, testNamespace)).To(Succeed()) + ramenConfig.RamenOpsNamespace = "" cancel() }) @@ -164,8 +166,9 @@ func getBaseVRG(namespace string) *ramen.VolumeReplicationGroup { Async: &ramen.VRGAsyncSpec{ SchedulingInterval: "5m", }, - ReplicationState: ramen.Primary, - S3Profiles: []string{"dummy-s3-profile"}, + ReplicationState: ramen.Primary, + S3Profiles: []string{"dummy-s3-profile"}, + ProtectedNamespaces: &[]string{namespace}, }, } } @@ -200,12 +203,13 @@ func getVRGDefinitionWithKubeObjectProtection(hasPVCSelectorLabels bool, namespa return vrg } -func getTestHook() *Recipe.Hook { +func getTestHook(testNamespace string) *Recipe.Hook { duration := 30 return &Recipe.Hook{ - Name: "hook-single", - Type: "exec", + Name: "hook-single", + Type: "exec", + Namespace: testNamespace, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "myapp": "testapp", @@ -267,7 +271,7 @@ func getRecipeDefinition(namespace string) *Recipe.Recipe { Spec: Recipe.RecipeSpec{ Groups: []*Recipe.Group{getTestGroup()}, Volumes: getTestVolumeGroup(), - Hooks: []*Recipe.Hook{getTestHook()}, + Hooks: []*Recipe.Hook{getTestHook(namespace)}, Workflows: []*Recipe.Workflow{ { Name: "backup", @@ -279,7 +283,7 @@ func getRecipeDefinition(namespace string) *Recipe.Recipe { "group": "test-group", }, { - "hook": "test-hook", + "hook": "hook-single/checkpoint", }, }, }, diff --git a/internal/controller/vrg_recipe.go b/internal/controller/vrg_recipe.go index 5c46a50fb..0049629d1 100644 --- a/internal/controller/vrg_recipe.go +++ b/internal/controller/vrg_recipe.go @@ -79,44 +79,35 @@ func GetPVCSelector(ctx context.Context, reader client.Reader, vrg ramen.VolumeR ramenConfig ramen.RamenConfig, log logr.Logger, ) (PvcSelector, error) { - var recipeElements RecipeElements + recipeElements, err := RecipeElementsGet(ctx, reader, vrg, ramenConfig, log) + if err != nil { + return PvcSelector{}, err + } - return recipeElements.PvcSelector, recipeVolumesAndOptionallyWorkflowsGet( - ctx, reader, vrg, ramenConfig, log, &recipeElements, - func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup, ramen.RamenConfig) error { - return nil - }, - ) + return recipeElements.PvcSelector, nil } func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.VolumeReplicationGroup, - ramenConfig ramen.RamenConfig, log logr.Logger, recipeElements *RecipeElements, -) error { - return recipeVolumesAndOptionallyWorkflowsGet(ctx, reader, vrg, ramenConfig, log, recipeElements, - recipeWorkflowsGet, - ) -} + ramenConfig ramen.RamenConfig, log logr.Logger, +) (RecipeElements, error) { + var recipeElements RecipeElements -func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.Reader, vrg ramen.VolumeReplicationGroup, - ramenConfig ramen.RamenConfig, log logr.Logger, recipeElements *RecipeElements, - workflowsGet func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup, ramen.RamenConfig) error, -) error { if vrg.Spec.KubeObjectProtection == nil { - *recipeElements = RecipeElements{ + recipeElements = RecipeElements{ PvcSelector: getPVCSelector(vrg, ramenConfig, nil, nil), } - return nil + return recipeElements, nil } if vrg.Spec.KubeObjectProtection.RecipeRef == nil { - *recipeElements = RecipeElements{ + recipeElements = RecipeElements{ PvcSelector: getPVCSelector(vrg, ramenConfig, nil, nil), CaptureWorkflow: captureWorkflowDefault(vrg, ramenConfig), RecoverWorkflow: recoverWorkflowDefault(vrg, ramenConfig), } - return nil + return recipeElements, nil } recipeNamespacedName := types.NamespacedName{ @@ -126,11 +117,11 @@ func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.R recipe := recipe.Recipe{} if err := reader.Get(ctx, recipeNamespacedName, &recipe); err != nil { - return fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) + return recipeElements, fmt.Errorf("recipe %v get error: %w", recipeNamespacedName.String(), err) } if err := RecipeParametersExpand(&recipe, vrg.Spec.KubeObjectProtection.RecipeParameters, log); err != nil { - return err + return recipeElements, fmt.Errorf("recipe %v parameters expansion error: %w", recipeNamespacedName.String(), err) } var selector PvcSelector @@ -141,15 +132,19 @@ func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.R recipe.Spec.Volumes.LabelSelector) } - *recipeElements = RecipeElements{ + recipeElements = RecipeElements{ PvcSelector: selector, } - if err := workflowsGet(recipe, recipeElements, vrg, ramenConfig); err != nil { - return err + if err := recipeWorkflowsGet(recipe, &recipeElements, vrg, ramenConfig); err != nil { + return recipeElements, fmt.Errorf("recipe %v workflows get error: %w", recipeNamespacedName.String(), err) + } + + if err := recipeNamespacesValidate(recipeElements, vrg, ramenConfig); err != nil { + return recipeElements, fmt.Errorf("recipe %v namespaces validation error: %w", recipeNamespacedName.String(), err) } - return recipeNamespacesValidate(*recipeElements, vrg, ramenConfig) + return recipeElements, nil } func RecipeParametersExpand(recipe *recipe.Recipe, parameters map[string][]string, diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index 1bdbdf236..1c7d67546 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -437,7 +437,7 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log // any further and it can be skipped. The pvc will go away eventually. func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { if pvc.Status.Phase != corev1.ClaimBound { - log.Info("Skipping handling of VR as PersistentVolumeClaim is not bound", "pvcPhase", pvc.Status.Phase) + log.Info("Skipping handling of VR as PVC is not bound", "pvcPhase", pvc.Status.Phase) msg := "PVC not bound yet" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) @@ -451,7 +451,7 @@ func skipPVC(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) func isPVCDeletedAndNotProtected(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, string) { // If PVC deleted but not yet protected with a finalizer, skip it! if !containsString(pvc.Finalizers, PvcVRFinalizerProtected) && rmnutil.ResourceIsDeleted(pvc) { - log.Info("Skipping PersistentVolumeClaim, as it is marked for deletion and not yet protected") + log.Info("Skipping PVC, as it is marked for deletion and not yet protected") msg := "Skipping pvc marked for deletion" // v.updateProtectedPVCCondition(pvc.Name, VRGConditionReasonProgressing, msg) diff --git a/internal/controller/vrg_volsync.go b/internal/controller/vrg_volsync.go index 2f7b107f7..c23d127ad 100644 --- a/internal/controller/vrg_volsync.go +++ b/internal/controller/vrg_volsync.go @@ -113,6 +113,12 @@ func (v *VRGInstance) reconcileVolSyncAsPrimary(finalSyncPrepared *bool) (requeu } for _, pvc := range v.volSyncPVCs { + if pvc.Status.Phase != corev1.ClaimBound { + v.log.Info("Skipping PVC - PVC is not Bound.", "name", pvc.GetName()) + + continue + } + requeuePVC := v.reconcilePVCAsVolSyncPrimary(pvc) if requeuePVC { requeue = true @@ -548,23 +554,39 @@ func (v *VRGInstance) disownPVCs() error { return nil } -// cleanupResources this function deleted all PS, PD and VolumeSnapshots from its owner (VRG) +// cleanupResources this function deleted all RS, RD and VolumeSnapshots from its owner (VRG) func (v *VRGInstance) cleanupResources() error { for idx := range v.volSyncPVCs { pvc := &v.volSyncPVCs[idx] - if err := v.volSyncHandler.DeleteRS(pvc.Name, pvc.Namespace); err != nil { + if err := v.doCleanupResources(pvc.Name, pvc.Namespace); err != nil { return err } + } - if err := v.volSyncHandler.DeleteRD(pvc.Name, pvc.Namespace); err != nil { - return err - } + for idx := range v.instance.Spec.VolSync.RDSpec { + protectedPVC := v.instance.Spec.VolSync.RDSpec[idx].ProtectedPVC - if err := v.volSyncHandler.DeleteSnapshots(pvc.Namespace); err != nil { + if err := v.doCleanupResources(protectedPVC.Name, protectedPVC.Namespace); err != nil { return err } } return nil } + +func (v *VRGInstance) doCleanupResources(name, namespace string) error { + if err := v.volSyncHandler.DeleteRS(name, namespace); err != nil { + return err + } + + if err := v.volSyncHandler.DeleteRD(name, namespace); err != nil { + return err + } + + if err := v.volSyncHandler.DeleteSnapshots(namespace); err != nil { + return err + } + + return nil +} diff --git a/ramenctl/ramenctl/resources/configmap.yaml b/ramenctl/ramenctl/resources/configmap.yaml index e8c041dba..c06ed47d5 100644 --- a/ramenctl/ramenctl/resources/configmap.yaml +++ b/ramenctl/ramenctl/resources/configmap.yaml @@ -38,6 +38,7 @@ data: disabled: $volsync_disabled multiNamespace: FeatureEnabled: true + volsyncSupported: true ramenOpsNamespace: ramen-ops s3StoreProfiles: - s3ProfileName: minio-on-$cluster1 diff --git a/test/README.md b/test/README.md index 42603f285..eaa407aab 100644 --- a/test/README.md +++ b/test/README.md @@ -144,7 +144,8 @@ environment. virtctl ``` - lima version 1.0.0 or later is required. + lima version 1.0.0 or later is required, latest version is + recommended. 1. Install the `clusteradm` tool. See [Install clusteradm CLI tool](https://open-cluster-management.io/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool) @@ -164,19 +165,25 @@ environment. For more info see [kubectl-gather](https://github.com/nirs/kubectl-gather) -1. Install `socket_vmnet` from source +1. Install `socket_vmnet` > [!IMPORTANT] - > Do not install socket_vmnet from brew, it is insecure. + > You must install the socket_vmnet launchd service, we don't manage + > socket_vment with Lima. ``` - git clone https://github.com/lima-vm/socket_vmnet.git - cd socket_vmnet - sudo make PREFIX=/opt/socket_vmnet install.bin - sudo make PREFIX=/opt/socket_vmnet install.launchd + VERSION="$(curl -fsSL https://api.github.com/repos/lima-vm/socket_vmnet/releases/latest | jq -r .tag_name)" + FILE="socket_vmnet-${VERSION:1}-$(uname -m).tar.gz" + SERVICE_ID="io.github.lima-vm.socket_vmnet" + curl -OSL "https://github.com/lima-vm/socket_vmnet/releases/download/${VERSION}/${FILE}" + sudo tar Cxzvf / "${FILE}" opt/socket_vmnet + sudo cp "/opt/socket_vmnet/share/doc/socket_vmnet/launchd/$SERVICE_ID.plist" "/Library/LaunchDaemons/$SERVICE_ID.plist" + sudo launchctl bootstrap system "/Library/LaunchDaemons/$SERVICE_ID.plist" + sudo launchctl enable system/$SERVICE_ID + sudo launchctl kickstart -kp system/$SERVICE_ID ``` - For more info see [Installing socket_vmnet from source](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-source) + For more info see [Installing socket_vmnet from binary](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-binary) ## Testing that drenv is healthy