diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1a4504338..8b99fe290 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -114,7 +114,12 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - python-version: ["3.9", "3.10", "3.11"] + python-version: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + - "3.13-dev" steps: - name: Checkout source uses: actions/checkout@v4 @@ -178,8 +183,12 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - python-version: ["3.9", "3.10", "3.11"] - + python-version: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + - "3.13-dev" steps: - name: Checkout source uses: actions/checkout@v4 diff --git a/.github/workflows/e2e-daily.yaml b/.github/workflows/e2e-daily.yaml new file mode 100644 index 000000000..200b93eba --- /dev/null +++ b/.github/workflows/e2e-daily.yaml @@ -0,0 +1,36 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +# yamllint disable rule:line-length +name: E2E Daily + +on: # yamllint disable-line rule:truthy + # Run every day on 03:00. + schedule: + - cron: '0 3 * * *' + # Allow manual run. + # (Actions -> E2E Daily -> Run workflow) + workflow_dispatch: + +jobs: + refresh-cache: + runs-on: [self-hosted, e2e-rdr] + if: github.repository == 'RamenDR/ramen' + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - name: Install drenv + run: pip install -e test + + - name: Refresh cache + uses: nick-fields/retry@v3 + with: + timeout_minutes: 2 + retry_wait_seconds: 60 + max_attempts: 10 + command: | + cd test + drenv cache -v envs/regional-dr.yaml diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 0d954009b..4824fc254 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -29,7 +29,7 @@ jobs: - name: Setup drenv working-directory: test - run: drenv setup -v + run: drenv setup -v envs/regional-dr.yaml - name: Install ramenctl run: pip install -e ramenctl @@ -100,4 +100,4 @@ jobs: - name: Cleanup drenv if: always() working-directory: test - run: drenv cleanup -v + run: drenv cleanup -v envs/regional-dr.yaml diff --git a/Makefile b/Makefile index 0de1cdf16..e307f3445 100644 --- a/Makefile +++ b/Makefile @@ -176,6 +176,9 @@ test-drcluster: generate manifests envtest ## Run DRCluster tests. test-drpolicy: generate manifests envtest ## Run DRPolicy tests. go test ./internal/controller -coverprofile cover.out -ginkgo.focus DRPolicyController +test-drclusterconfig: generate manifests envtest ## Run DRClusterConfig tests. + go test ./internal/controller -coverprofile cover.out -ginkgo.focus DRClusterConfig + test-util: generate manifests envtest ## Run util tests. go test ./internal/controller/util -coverprofile cover.out diff --git a/api/go.mod b/api/go.mod index e75c3fd8e..5f99d95f3 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,10 +1,13 @@ module github.com/ramendr/ramen/api -go 1.22.6 +go 1.22.0 + +toolchain go1.22.7 require ( k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 + k8s.io/component-base v0.28.3 sigs.k8s.io/controller-runtime v0.16.3 ) @@ -19,7 +22,6 @@ require ( golang.org/x/text v0.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.28.3 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/cmd/main.go b/cmd/main.go index 1d8bc491a..406cb3dc4 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -15,11 +15,8 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" - clrapiv1beta1 "github.com/open-cluster-management-io/api/cluster/v1beta1" - ocmv1 "github.com/open-cluster-management/api/cluster/v1" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -29,6 +26,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ocmv1 "open-cluster-management.io/api/cluster/v1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + ocmworkv1 "open-cluster-management.io/api/work/v1" cpcv1 "open-cluster-management.io/config-policy-controller/api/v1" gppv1 "open-cluster-management.io/governance-policy-propagator/api/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -119,6 +120,7 @@ func configureController(ramenConfig *ramendrv1alpha1.RamenConfig) error { utilruntime.Must(groupsnapv1alpha1.AddToScheme(scheme)) utilruntime.Must(recipe.AddToScheme(scheme)) utilruntime.Must(apiextensions.AddToScheme(scheme)) + utilruntime.Must(clusterv1alpha1.AddToScheme(scheme)) } return nil @@ -174,6 +176,7 @@ func setupReconcilersCluster(mgr ctrl.Manager, ramenConfig *ramendrv1alpha1.Rame if err := (&controllers.DRClusterConfigReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DRClusterConfig"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DRClusterConfig") os.Exit(1) diff --git a/config/dr-cluster/manifests/bases/ramen_dr_cluster.clusterserviceversion.yaml b/config/dr-cluster/manifests/bases/ramen_dr_cluster.clusterserviceversion.yaml index ddca5ab8a..261c6a40f 100644 --- a/config/dr-cluster/manifests/bases/ramen_dr_cluster.clusterserviceversion.yaml +++ b/config/dr-cluster/manifests/bases/ramen_dr_cluster.clusterserviceversion.yaml @@ -3,7 +3,7 @@ kind: ClusterServiceVersion metadata: annotations: alm-examples: '[]' - capabilities: Basic Install + capabilities: Seamless Upgrades operators.openshift.io/infrastructure-features: '["disconnected"]' name: ramen-dr-cluster-operator.v0.0.0 namespace: placeholder diff --git a/config/dr-cluster/rbac/role.yaml b/config/dr-cluster/rbac/role.yaml index 47ef42d6a..480bee100 100644 --- a/config/dr-cluster/rbac/role.yaml +++ b/config/dr-cluster/rbac/role.yaml @@ -137,6 +137,17 @@ rules: - patch - update - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - ramendr.openshift.io resources: diff --git a/config/hub/manifests/bases/ramen_hub.clusterserviceversion.yaml b/config/hub/manifests/bases/ramen_hub.clusterserviceversion.yaml index 56d7e1c78..97343d863 100644 --- a/config/hub/manifests/bases/ramen_hub.clusterserviceversion.yaml +++ b/config/hub/manifests/bases/ramen_hub.clusterserviceversion.yaml @@ -3,7 +3,7 @@ kind: ClusterServiceVersion metadata: annotations: alm-examples: '[]' - capabilities: Basic Install + capabilities: Seamless Upgrades operators.openshift.io/infrastructure-features: '["disconnected"]' name: ramen-hub-operator.v0.0.0 namespace: placeholder diff --git a/config/prometheus/alerts.yaml b/config/prometheus/alerts.yaml index 344b9c135..3230f3c59 100644 --- a/config/prometheus/alerts.yaml +++ b/config/prometheus/alerts.yaml @@ -38,6 +38,5 @@ spec: labels: severity: warning annotations: - description: "Workload is not protected for disaster recovery (DRPC: {{ $labels.obj_name }}, Namespace: {{ $labels.obj_namespace }})." + description: "Workload is not protected for disaster recovery (DRPC: {{ $labels.obj_name }}, Namespace: {{ $labels.obj_namespace }}). Inspect DRPC status.conditions for details." alert_type: "DisasterRecovery" - \ No newline at end of file diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 664648058..3f7cea04f 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -85,6 +85,17 @@ rules: - get - list - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - cluster.open-cluster-management.io resources: diff --git a/docs/devel-quick-start.md b/docs/devel-quick-start.md index 582cf38b7..3b5d85ad2 100644 --- a/docs/devel-quick-start.md +++ b/docs/devel-quick-start.md @@ -33,24 +33,14 @@ SPDX-License-Identifier: Apache-2.0 git remote add upstream https://github.com/RamenDR/ramen.git ``` -1. Set up a commit-msg hook to sign off your commits +1. Add the commit-msg hook to sign off your commits *Ramen* requires a `Signed-off-by: My Name ` footer in the commit message. To add it automatically to all commits, add this hook: ```sh - $ cat .git/hooks/commit-msg - #!/bin/sh - # Add Signed-off-by trailer. - sob=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') - git interpret-trailers --in-place --trailer "$sob" "$1" - ``` - - And make the hook executable: - - ```sh - chmod +x .git/hooks/commit-msg + cp hack/commit-msg .git/hooks/ ``` ## Setting up the environment for development and testing diff --git a/docs/krp.md b/docs/krp.md index 06cd9e6ea..abf549c05 100644 --- a/docs/krp.md +++ b/docs/krp.md @@ -96,7 +96,8 @@ The VRG enables Kubernetes resources to be captured(backed up) and recovered as part of disaster protection. This is accomplished through the kubeObjectProtection section of the VRG spec. If kubeObjectProtection is not included in a VRG, then Kubernetes resources are not protected as part of the -VRG disaster protection. +VRG disaster protection. PVCs and PVs are protected by Ramen and all resources +are protected and backed by kubeObjectProtection. The kubeObjectProtection section contains two sub-sections, captureOrder and recoverOrder. This captureOrder section provides instructions on how to backup diff --git a/docs/user-quick-start.md b/docs/user-quick-start.md index 4d5cea04c..fdb6b421a 100644 --- a/docs/user-quick-start.md +++ b/docs/user-quick-start.md @@ -135,14 +135,14 @@ enough resources: ``` 1. Install `clusteradm` tool. See - [Install clusteradm CLI tool](https://open-cluster-management.io/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool) + [Install clusteradm CLI tool](https://open-cluster-management.io/docs/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool) for the details. Version v0.8.1 or later is reuired. 1. Install `subctl` tool, See [Submariner subctl installation](https://submariner.io/operations/deployment/subctl/) for the details. - Version v0.17.0 or later is required. + Version v0.18.0 or later is required. 1. Install the `velero` tool diff --git a/e2e/go.mod b/e2e/go.mod index cc0c66b81..f840a8655 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -1,6 +1,8 @@ module github.com/ramendr/ramen/e2e -go 1.22.6 +go 1.22.0 + +toolchain go1.22.7 require ( github.com/go-logr/logr v1.4.1 diff --git a/go.mod b/go.mod index 79b17cabf..63f8b34c5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/ramendr/ramen -go 1.22.6 +go 1.22.5 + +toolchain go1.22.7 // This replace should always be here for ease of development. replace github.com/ramendr/ramen/api => ./api @@ -8,34 +10,34 @@ replace github.com/ramendr/ramen/api => ./api require ( github.com/aws/aws-sdk-go v1.44.289 github.com/backube/volsync v0.7.1 - github.com/csi-addons/kubernetes-csi-addons v0.8.1-0.20240822090723-89d4c5b45a32 + github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 - github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 - github.com/onsi/ginkgo/v2 v2.20.0 - github.com/onsi/gomega v1.34.1 - github.com/open-cluster-management-io/api v0.0.0-00010101000000-000000000000 - github.com/open-cluster-management/api v0.0.0-20210527013639-a6845f2ebcb1 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/onsi/ginkgo/v2 v2.20.2 + github.com/onsi/gomega v1.34.2 github.com/operator-framework/api v0.17.6 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.18.0 - github.com/ramendr/ramen/api v0.0.0-20240117171503-e11c56eac24d - github.com/ramendr/recipe v0.0.0-20230817160432-729dc7fd8932 + github.com/prometheus/client_golang v1.20.4 + github.com/ramendr/ramen/api v0.0.0-20240924121439-b7cba82de417 + github.com/ramendr/recipe v0.0.0-20240918115450-667b9d79599f github.com/stolostron/multicloud-operators-foundation v0.0.0-20220824091202-e9cd9710d009 github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0 github.com/vmware-tanzu/velero v1.9.1 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/time v0.3.0 - k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver v0.30.1 - k8s.io/apimachinery v0.30.3 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + golang.org/x/time v0.6.0 + k8s.io/api v0.31.1 + k8s.io/apiextensions-apiserver v0.31.1 + k8s.io/apimachinery v0.31.1 k8s.io/client-go v12.0.0+incompatible - k8s.io/component-base v0.30.1 - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 + k8s.io/component-base v0.31.1 + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 + open-cluster-management.io/api v0.13.0 open-cluster-management.io/config-policy-controller v0.12.0 open-cluster-management.io/governance-policy-propagator v0.12.0 - sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 ) @@ -43,14 +45,15 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -58,24 +61,24 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.10 // indirect github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.59.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.9.3 // indirect @@ -84,13 +87,14 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect + github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -98,14 +102,10 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 // indirect open-cluster-management.io/multicloud-operators-subscription v0.12.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) // replace directives to accommodate for stolostron -replace k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.30.2 - -replace github.com/open-cluster-management-io/api => open-cluster-management.io/api v0.10.0 +replace k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.31.0 diff --git a/go.sum b/go.sum index d27a9bcc5..7f4e84dca 100644 --- a/go.sum +++ b/go.sum @@ -36,21 +36,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.289 h1:5CVEjiHFvdiVlKPBzv0rjG4zH/21W/onT18R5AH/qx0= github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/backube/volsync v0.7.1 h1:GK3MEY9qtZ99ykP/3wl1shVYHKW20nSIfWR3pC3F3RE= @@ -69,64 +56,45 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/csi-addons/kubernetes-csi-addons v0.8.1-0.20240822090723-89d4c5b45a32 h1:yUmy2M1C/EEOridkltNENGb1NlkGIRvdxdQGjWt2U3Y= -github.com/csi-addons/kubernetes-csi-addons v0.8.1-0.20240822090723-89d4c5b45a32/go.mod h1:/YROZDdEi1N/1Ls9rdU5W2VNjm8MK7HHApl8W4Sqt9s= +github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 h1:9mh79gS8O8uO5okZ2DhFO0LSrhpVXd9R9DLvbnh2He4= +github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1/go.mod h1:LeY7UYm8nEBCG1RcJG0DHmJbva0ILmtp+kcegxRuHhc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -175,7 +143,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -191,26 +158,21 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -219,101 +181,79 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 h1:cMM5AB37e9aRGjErygVT6EuBPB6s5a+l95OPERmSlVM= github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0/go.mod h1:VQVLCPGDX5l6V5PezjlDXLa+SpCbWSVU7B16cFWVVeE= -github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 h1:j3YK74myEQRxR/srciTpOrm221SAvz6J5OVWbyfeXFo= -github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0/go.mod h1:FlyYFe32mPxKEPaRXKNxfX576d1AoCzstYDoOOnyMA4= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/open-cluster-management/api v0.0.0-20210527013639-a6845f2ebcb1 h1:AaFycHD9YOfFXe9C5VsYxKf4LKCXKSLZgK2DnFdHY4M= -github.com/open-cluster-management/api v0.0.0-20210527013639-a6845f2ebcb1/go.mod h1:ot+A1DWq+v1IV+e1S7nhIteYAmNByFgtazvzpoeAfRQ= -github.com/openshift/build-machinery-go v0.0.0-20210115170933-e575b44a7a94/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/operator-framework/api v0.17.6 h1:E6+vlvYUKafvoXYtCuHlDZrXX4vl8AT+r93OxNlzjpU= github.com/operator-framework/api v0.17.6/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/ramendr/recipe v0.0.0-20230817160432-729dc7fd8932 h1:n89W9K2gDa0XwdIVuWyg53hPgaR97DfGVi9o2V0WcWA= -github.com/ramendr/recipe v0.0.0-20230817160432-729dc7fd8932/go.mod h1:QHVQXKgNId8EfvNd+Y6JcTrsXwTImtSFkV4IsiOkwCw= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/ramendr/recipe v0.0.0-20240918115450-667b9d79599f h1:LuuCBtH5e9dYFQZIOQAZzcXQwtedrWgNWflmCaZZrlo= +github.com/ramendr/recipe v0.0.0-20240918115450-667b9d79599f/go.mod h1:QHVQXKgNId8EfvNd+Y6JcTrsXwTImtSFkV4IsiOkwCw= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= @@ -323,14 +263,12 @@ github.com/stolostron/multicloud-operators-foundation v0.0.0-20220824091202-e9cd github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0 h1:qL6eeBtdjLq7ktBBg8tB44b6jTKQjFy6bdl8EM+Kq6o= github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0/go.mod h1:uMTaz9cMLe5N+yJ/PpHPtSOdlBFB00WdxAW+K5TfkVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -341,6 +279,8 @@ github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8 github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/vmware-tanzu/velero v1.9.1 h1:uZhNMq1Pn8AZjT7HLtKseTq47EeHeIuUxvGPFFp/+Vs= github.com/vmware-tanzu/velero v1.9.1/go.mod h1:75v4RUMzs8RK6Kqmrg6jgIOBaHUgAWwWAFiWERw2l4U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -361,10 +301,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -378,8 +316,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -406,7 +344,6 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -414,11 +351,9 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -435,14 +370,13 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -452,8 +386,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -466,18 +400,15 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -495,7 +426,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -507,13 +437,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -522,16 +452,14 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -541,7 +469,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -566,10 +493,8 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -584,8 +509,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -684,17 +609,15 @@ google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6h google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -709,39 +632,26 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= -k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= -k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= -k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50= -k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs= -k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= -k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= +k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/kubectl v0.26.1 h1:K8A0Jjlwg8GqrxOXxAbjY5xtmXYeYjLU96cHp2WMQ7s= k8s.io/kubectl v0.26.1/go.mod h1:miYFVzldVbdIiXMrHZYmL/EDWwJKM+F0sSsdxsATFPo= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -open-cluster-management.io/api v0.10.0 h1:B6/nwKO7cXDuKV5uJLjF/JUuPuiKsep08gfmAAWaKKc= -open-cluster-management.io/api v0.10.0/go.mod h1:6BB/Y6r3hXlPjpJgDwIs6Ubxyx/kXXOg6D9Cntg1I9E= -open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83 h1:3zbT3sT/tEAQbpjIk6uRiTQGknQ3kQlfd11ElVuXyyQ= -open-cluster-management.io/api v0.11.1-0.20230905055724-cf1ead467a83/go.mod h1:nsQ/G5JpfjQUg7dHpblyywWC6BRqklNaF6fIswVCHyY= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= +open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= open-cluster-management.io/config-policy-controller v0.12.0 h1:YQKv/esFuiaWaJ/UcAEX2Sy/uhHYbjiHHzf3+bpRKQs= open-cluster-management.io/config-policy-controller v0.12.0/go.mod h1:qAqUAmQAv86Jur4rPAZES2WTp8q7C565Zr1OyflXpXk= open-cluster-management.io/governance-policy-propagator v0.12.0 h1:jwIsJKme9AjtU1e1OKbUU3ciok+nQ0VMe8YVkz8a00E= @@ -751,14 +661,11 @@ open-cluster-management.io/multicloud-operators-subscription v0.12.0/go.mod h1:+ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/commit-msg b/hack/commit-msg new file mode 100755 index 000000000..e48f38ac7 --- /dev/null +++ b/hack/commit-msg @@ -0,0 +1,4 @@ +#!/bin/sh +# Add Signed-off-by trailer. +sob=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +git interpret-trailers --in-place --trailer "$sob" "$1" diff --git a/hack/install-setup-envtest.sh b/hack/install-setup-envtest.sh index 6d1c30cc4..96b646fdb 100755 --- a/hack/install-setup-envtest.sh +++ b/hack/install-setup-envtest.sh @@ -3,7 +3,7 @@ set -e script_dir="$(cd "$(dirname "$0")" && pwd)" -required_version="release-0.17" +required_version="release-0.19" source_url="sigs.k8s.io/controller-runtime/tools/setup-envtest@${required_version}" target_dir="${script_dir}/../testbin" target_path="${target_dir}/setup-envtest" diff --git a/hack/make-venv b/hack/make-venv index cc302ce38..a37ab78f7 100755 --- a/hack/make-venv +++ b/hack/make-venv @@ -27,9 +27,6 @@ cp coverage.pth $venv/lib/python*/site-packages echo "Adding venv symlink..." ln -sf $venv/bin/activate venv -echo "Setting up minikube for drenv" -$venv/bin/drenv setup -v - echo echo "To activate the environment run:" echo diff --git a/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml b/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml new file mode 100644 index 000000000..4359dcadb --- /dev/null +++ b/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterclaims.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ClusterClaim + listKind: ClusterClaimList + plural: clusterclaims + singular: clusterclaim + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ClusterClaim represents cluster information that a managed cluster claims + ClusterClaims with well known names include, + 1. id.k8s.io, it contains a unique identifier for the cluster. + 2. clusterset.k8s.io, it contains an identifier that relates the cluster + to the ClusterSet in which it belongs. + + + ClusterClaims created on a managed cluster will be collected and saved into + the status of the corresponding ManagedCluster on hub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of the ClusterClaim. + properties: + value: + description: Value is a claim-dependent string + maxLength: 1024 + minLength: 1 + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml index f78126a55..a7d1b3de5 100644 --- a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml +++ b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml @@ -1,4 +1,3 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -92,4 +91,4 @@ spec: type: object served: true storage: true - subresources: {} + subresources: {} \ No newline at end of file diff --git a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml index d0d4d7907..8d7eaea61 100644 --- a/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml +++ b/hack/test/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml @@ -3,9 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/995" - controller-gen.kubebuilder.io/version: v0.12.0 - creationTimestamp: null + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068" + controller-gen.kubebuilder.io/version: v0.15.0 name: volumegroupsnapshots.groupsnapshot.storage.k8s.io spec: group: groupsnapshot.storage.k8s.io @@ -47,215 +46,223 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: VolumeGroupSnapshot is a user's request for creating either a - point-in-time group snapshot or binding to a pre-existing group snapshot. + description: |- + VolumeGroupSnapshot is a user's request for creating either a point-in-time + group snapshot or binding to a pre-existing group snapshot. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: Spec defines the desired characteristics of a group snapshot - requested by a user. Required. + description: |- + Spec defines the desired characteristics of a group snapshot requested by a user. + Required. properties: source: - description: Source specifies where a group snapshot will be created - from. This field is immutable after creation. Required. + description: |- + Source specifies where a group snapshot will be created from. + This field is immutable after creation. + Required. properties: selector: - description: Selector is a label query over persistent volume - claims that are to be grouped together for snapshotting. This - labelSelector will be used to match the label added to a PVC. + description: |- + Selector is a label query over persistent volume claims that are to be + grouped together for snapshotting. + This labelSelector will be used to match the label added to a PVC. If the label is added or removed to a volume after a group snapshot is created, the existing group snapshots won't be modified. - Once a VolumeGroupSnapshotContent is created and the sidecar - starts to process it, the volume list will not change with retries. + Once a VolumeGroupSnapshotContent is created and the sidecar starts to process + it, the volume list will not change with retries. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: selector is immutable + rule: self == oldSelf volumeGroupSnapshotContentName: - description: VolumeGroupSnapshotContentName specifies the name - of a pre-existing VolumeGroupSnapshotContent object representing - an existing volume group snapshot. This field should be set - if the volume group snapshot already exists and only needs a - representation in Kubernetes. This field is immutable. + description: |- + VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent + object representing an existing volume group snapshot. + This field should be set if the volume group snapshot already exists and + only needs a representation in Kubernetes. + This field is immutable. type: string + x-kubernetes-validations: + - message: volumeGroupSnapshotContentName is immutable + rule: self == oldSelf type: object + x-kubernetes-validations: + - message: selector is required once set + rule: '!has(oldSelf.selector) || has(self.selector)' + - message: volumeGroupSnapshotContentName is required once set + rule: '!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)' + - message: exactly one of selector and volumeGroupSnapshotContentName + must be set + rule: (has(self.selector) && !has(self.volumeGroupSnapshotContentName)) + || (!has(self.selector) && has(self.volumeGroupSnapshotContentName)) volumeGroupSnapshotClassName: - description: VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass - requested by the VolumeGroupSnapshot. VolumeGroupSnapshotClassName - may be left nil to indicate that the default class will be used. + description: |- + VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass + requested by the VolumeGroupSnapshot. + VolumeGroupSnapshotClassName may be left nil to indicate that the default + class will be used. Empty string is not allowed for this field. type: string + x-kubernetes-validations: + - message: volumeGroupSnapshotClassName must not be the empty string + when set + rule: size(self) > 0 required: - source type: object status: - description: Status represents the current information of a group snapshot. - Consumers must verify binding between VolumeGroupSnapshot and VolumeGroupSnapshotContent - objects is successful (by validating that both VolumeGroupSnapshot and - VolumeGroupSnapshotContent point to each other) before using this object. + description: |- + Status represents the current information of a group snapshot. + Consumers must verify binding between VolumeGroupSnapshot and + VolumeGroupSnapshotContent objects is successful (by validating that both + VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before + using this object. properties: boundVolumeGroupSnapshotContentName: - description: 'BoundVolumeGroupSnapshotContentName is the name of the - VolumeGroupSnapshotContent object to which this VolumeGroupSnapshot - object intends to bind to. If not specified, it indicates that the - VolumeGroupSnapshot object has not been successfully bound to a - VolumeGroupSnapshotContent object yet. NOTE: To avoid possible security - issues, consumers must verify binding between VolumeGroupSnapshot - and VolumeGroupSnapshotContent objects is successful (by validating - that both VolumeGroupSnapshot and VolumeGroupSnapshotContent point - at each other) before using this object.' + description: |- + BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent + object to which this VolumeGroupSnapshot object intends to bind to. + If not specified, it indicates that the VolumeGroupSnapshot object has not + been successfully bound to a VolumeGroupSnapshotContent object yet. + NOTE: To avoid possible security issues, consumers must verify binding between + VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful + (by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent + point at each other) before using this object. type: string creationTime: - description: CreationTime is the timestamp when the point-in-time - group snapshot is taken by the underlying storage system. If not - specified, it may indicate that the creation time of the group snapshot - is unknown. The format of this field is a Unix nanoseconds time - encoded as an int64. On Unix, the command date +%s%N returns the - current time in nanoseconds since 1970-01-01 00:00:00 UTC. + description: |- + CreationTime is the timestamp when the point-in-time group snapshot is taken + by the underlying storage system. + If not specified, it may indicate that the creation time of the group snapshot + is unknown. + The format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command date +%s%N returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. format: date-time type: string error: - description: Error is the last observed error during group snapshot - creation, if any. This field could be helpful to upper level controllers - (i.e., application controller) to decide whether they should continue - on waiting for the group snapshot to be created based on the type - of error reported. The snapshot controller will keep retrying when - an error occurs during the group snapshot creation. Upon success, - this error field will be cleared. + description: |- + Error is the last observed error during group snapshot creation, if any. + This field could be helpful to upper level controllers (i.e., application + controller) to decide whether they should continue on waiting for the group + snapshot to be created based on the type of error reported. + The snapshot controller will keep retrying when an error occurs during the + group snapshot creation. Upon success, this error field will be cleared. properties: message: - description: 'message is a string detailing the encountered error - during snapshot creation if specified. NOTE: message may be - logged, and it should not contain sensitive information.' + description: |- + message is a string detailing the encountered error during snapshot + creation if specified. + NOTE: message may be logged, and it should not contain sensitive + information. type: string time: description: time is the timestamp when the error was encountered. format: date-time type: string type: object - readyToUse: - description: ReadyToUse indicates if all the individual snapshots - in the group are ready to be used to restore a group of volumes. - ReadyToUse becomes true when ReadyToUse of all individual snapshots - become true. If not specified, it means the readiness of a group - snapshot is unknown. - type: boolean - volumeSnapshotRefList: - description: VolumeSnapshotRefList is the list of volume snapshot - references for this group snapshot. The maximum number of allowed - snapshots in the group is 100. + pvcVolumeSnapshotRefList: + description: |- + VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that + is part of this group snapshot. + The maximum number of allowed snapshots in the group is 100. items: - description: "ObjectReference contains enough information to let - you inspect or modify the referred object. --- New uses of this - type are discouraged because of difficulty describing its usage - when embedded in APIs. 1. Ignored fields. It includes many fields - which are not generally honored. For instance, ResourceVersion - and FieldPath are both very rarely valid in actual usage. 2. Invalid - usage help. It is impossible to add specific help for individual - usage. In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not honored\" - or \"name must be restricted\". Those cannot be well described - when embedded. 3. Inconsistent validation. Because the usages - are different, the validation rules are different by usage, which - makes it hard for users to predict what will happen. 4. The fields - are both imprecise and overly precise. Kind is not a precise - mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual struct - is irrelevant. 5. We cannot easily change it. Because this type - is embedded in many locations, updates to this type will affect - numerous schemas. Don't make new APIs embed an underspecified - API type they do not control. \n Instead of using this type, create - a locally provided and used type that is well-focused on your - reference. For example, ServiceReferences for admission registration: - https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: PVCVolumeSnapshotPair defines a pair of a PVC reference + and a Volume Snapshot Reference properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string + persistentVolumeClaimRef: + description: PersistentVolumeClaimRef is a reference to the + PVC this pair is referring to + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeSnapshotRef: + description: VolumeSnapshotRef is a reference to the VolumeSnapshot + this pair is referring to + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic type: object - x-kubernetes-map-type: atomic type: array + readyToUse: + description: |- + ReadyToUse indicates if all the individual snapshots in the group are ready + to be used to restore a group of volumes. + ReadyToUse becomes true when ReadyToUse of all individual snapshots become true. + If not specified, it means the readiness of a group snapshot is unknown. + type: boolean type: object required: - spec diff --git a/hack/test/recipes.ramendr.openshift.io.yaml b/hack/test/recipes.ramendr.openshift.io.yaml index 3336063f2..acdb80fd6 100644 --- a/hack/test/recipes.ramendr.openshift.io.yaml +++ b/hack/test/recipes.ramendr.openshift.io.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.10.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: recipes.ramendr.openshift.io spec: group: ramendr.openshift.io @@ -21,14 +20,19 @@ spec: description: Recipe is the Schema for the recipes API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -36,66 +40,91 @@ spec: description: RecipeSpec defines the desired state of Recipe properties: appType: - description: Type of application the recipe is designed for. (AppType - is not used yet. For now, we will match the name of the app CR) + description: |- + Type of application the recipe is designed for. (AppType is not used yet. For now, we will + match the name of the app CR) type: string - captureWorkflow: - description: The sequence of actions to capture data to protect from - disaster - properties: - failOn: - default: any-error - description: 'Implies behaviour in case of failure: any-error - (default), essential-error, full-error' - enum: - - any-error - - essential-error - - full-error - type: string - sequence: - description: 'List of the names of groups or hooks, in the order - in which they should be executed Format: : [/]' - items: - additionalProperties: - type: string - type: object - type: array - required: - - sequence - type: object groups: description: List of one or multiple groups items: - description: Groups defined in the recipe refine / narrow-down the - scope of its parent groups defined in the Application CR. Recipe - groups are always be associated to a parent group in Application - CR - explicitly or implicitly. Recipe groups can be used in the - context of backup and/or restore workflows + description: |- + Groups defined in the recipe refine / narrow-down the scope of its parent groups defined in the + Application CR. Recipe groups are always be associated to a parent group in Application CR - + explicitly or implicitly. Recipe groups can be used in the context of backup and/or restore workflows properties: backupRef: - description: Used for groups solely used in restore workflows - to refer to another group that is used in backup workflows. + description: |- + Used for groups solely used in restore workflows to refer to another group that is used in + backup workflows. type: string essential: description: Defaults to true, if set to false, a failure is not necessarily handled as fatal type: boolean + excludedNamespaces: + description: List of namespace to exclude + items: + type: string + type: array excludedResourceTypes: description: List of resource types to exclude items: type: string type: array includeClusterResources: - description: Whether to include any cluster-scoped resources. - If nil or true, cluster-scoped resources are included if they - are associated with the included namespace-scoped resources + description: |- + Whether to include any cluster-scoped resources. If nil or true, cluster-scoped resources are + included if they are associated with the included namespace-scoped resources type: boolean includedNamespaces: description: List of namespaces to include. items: type: string type: array + includedNamespacesByLabel: + description: Selects namespaces by label + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic includedResourceTypes: description: List of resource types to include. If unspecified, all resource types are included. @@ -109,25 +138,25 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -139,11 +168,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -155,11 +183,32 @@ spec: this expression. Valid for volume groups only. type: string parent: - description: Name of the parent group defined in the associated - Application CR. Optional - If unspecified, parent group is - represented by the implicit default group of Application CR - (implies the Application CR does not specify groups explicitly). + description: |- + Name of the parent group defined in the associated Application CR. Optional - If unspecified, + parent group is represented by the implicit default group of Application CR (implies the + Application CR does not specify groups explicitly). type: string + restoreOverwriteResources: + description: Whether to overwrite resources during restore. + Default to false. + type: boolean + restoreStatus: + description: RestoreStatus restores status if set to all the + includedResources specified. Specify '*' to restore all statuses + for all the CRs + properties: + excludedResources: + description: List of resource types to exclude. + items: + type: string + type: array + includedResources: + description: List of resource types to include. If unspecified, + all resource types are included. + items: + type: string + type: array + type: object selectResource: description: Determines the resource type which the fields labelSelector and nameSelector apply to for selecting PVCs. Default selection @@ -207,9 +256,9 @@ spec: true. Defaults to Fail. type: string timeout: - description: How long to wait for the check to execute - format: duration - type: string + description: How long to wait for the check to execute, + in seconds + type: integer required: - name type: object @@ -229,25 +278,25 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -259,11 +308,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -293,10 +341,8 @@ spec: properties: command: description: The command to execute - items: - type: string - minItems: 1 - type: array + minLength: 1 + type: string container: description: The container where the command should be executed @@ -314,9 +360,9 @@ spec: exit code. Defaults to Fail. type: string timeout: - description: How long to wait for the command to execute - format: duration - type: string + description: How long to wait for the command to execute, + in seconds + type: integer required: - command - name @@ -327,20 +373,16 @@ spec: x-kubernetes-list-type: map selectResource: description: Resource type to that a hook applies to - enum: - - pod - - deployment - - statefulset type: string singlePodOnly: - description: Boolean flag that indicates whether to execute - command on a single pod or on all pods that match the selector + description: |- + Boolean flag that indicates whether to execute command on a single pod or on all pods that + match the selector type: boolean timeout: - description: Default timeout applied to custom and built-in - operations. If not specified, equals to 30s. - format: duration - type: string + description: Default timeout in seconds applied to custom and + built-in operations. If not specified, equals to 30s. + type: integer type: description: Hook type enum: @@ -350,63 +392,89 @@ spec: type: string required: - name + - namespace - type type: object type: array x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map - recoverWorkflow: - description: The sequence of actions to recover data protected from - disaster - properties: - failOn: - default: any-error - description: 'Implies behaviour in case of failure: any-error - (default), essential-error, full-error' - enum: - - any-error - - essential-error - - full-error - type: string - sequence: - description: 'List of the names of groups or hooks, in the order - in which they should be executed Format: : [/]' - items: - additionalProperties: - type: string - type: object - type: array - required: - - sequence - type: object volumes: description: Volumes to protect from disaster properties: backupRef: - description: Used for groups solely used in restore workflows - to refer to another group that is used in backup workflows. + description: |- + Used for groups solely used in restore workflows to refer to another group that is used in + backup workflows. type: string essential: description: Defaults to true, if set to false, a failure is not necessarily handled as fatal type: boolean + excludedNamespaces: + description: List of namespace to exclude + items: + type: string + type: array excludedResourceTypes: description: List of resource types to exclude items: type: string type: array includeClusterResources: - description: Whether to include any cluster-scoped resources. - If nil or true, cluster-scoped resources are included if they - are associated with the included namespace-scoped resources + description: |- + Whether to include any cluster-scoped resources. If nil or true, cluster-scoped resources are + included if they are associated with the included namespace-scoped resources type: boolean includedNamespaces: description: List of namespaces to include. items: type: string type: array + includedNamespacesByLabel: + description: Selects namespaces by label + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic includedResourceTypes: description: List of resource types to include. If unspecified, all resource types are included. @@ -420,25 +488,25 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -450,11 +518,10 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -466,11 +533,31 @@ spec: this expression. Valid for volume groups only. type: string parent: - description: Name of the parent group defined in the associated - Application CR. Optional - If unspecified, parent group is represented - by the implicit default group of Application CR (implies the + description: |- + Name of the parent group defined in the associated Application CR. Optional - If unspecified, + parent group is represented by the implicit default group of Application CR (implies the Application CR does not specify groups explicitly). type: string + restoreOverwriteResources: + description: Whether to overwrite resources during restore. Default + to false. + type: boolean + restoreStatus: + description: RestoreStatus restores status if set to all the includedResources + specified. Specify '*' to restore all statuses for all the CRs + properties: + excludedResources: + description: List of resource types to exclude. + items: + type: string + type: array + includedResources: + description: List of resource types to include. If unspecified, + all resource types are included. + items: + type: string + type: array + type: object selectResource: description: Determines the resource type which the fields labelSelector and nameSelector apply to for selecting PVCs. Default selection @@ -492,6 +579,42 @@ spec: - name - type type: object + workflows: + description: Workflow is the sequence of actions to take + items: + description: Workflow is the sequence of actions to take + properties: + failOn: + default: any-error + description: 'Implies behaviour in case of failure: any-error + (default), essential-error, full-error' + enum: + - any-error + - essential-error + - full-error + type: string + name: + description: |- + Name of recipe. Names "backup" and "restore" are reserved and implicitly used by default for + backup or restore respectively + type: string + sequence: + description: |- + List of the names of groups or hooks, in the order in which they should be executed + Format: : [/] + items: + additionalProperties: + type: string + type: object + type: array + required: + - name + - sequence + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map required: - appType type: object diff --git a/internal/controller/cephfscg/cephfscg_suite_test.go b/internal/controller/cephfscg/cephfscg_suite_test.go index a11cb11da..ac01ece2e 100644 --- a/internal/controller/cephfscg/cephfscg_suite_test.go +++ b/internal/controller/cephfscg/cephfscg_suite_test.go @@ -9,8 +9,8 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/format" diff --git a/internal/controller/cephfscg/cghandler.go b/internal/controller/cephfscg/cghandler.go index da8a72153..e778bd098 100644 --- a/internal/controller/cephfscg/cghandler.go +++ b/internal/controller/cephfscg/cghandler.go @@ -6,7 +6,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/util" "github.com/ramendr/ramen/internal/controller/volsync" @@ -95,7 +95,7 @@ func (c *cgHandler) CreateOrUpdateReplicationGroupDestination( replicationGroupDestinationName, replicationGroupDestinationNamespace string, rdSpecsInGroup []ramendrv1alpha1.VolSyncReplicationDestinationSpec, ) (*ramendrv1alpha1.ReplicationGroupDestination, error) { - replicationGroupDestinationName += c.cgName + replicationGroupDestinationName = util.TrimToK8sResourceNameLength(replicationGroupDestinationName + c.cgName) log := c.logger.WithName("CreateOrUpdateReplicationGroupDestination"). WithValues("ReplicationGroupDestinationName", replicationGroupDestinationName, @@ -144,7 +144,7 @@ func (c *cgHandler) CreateOrUpdateReplicationGroupSource( replicationGroupSourceName, replicationGroupSourceNamespace string, runFinalSync bool, ) (*ramendrv1alpha1.ReplicationGroupSource, bool, error) { - replicationGroupSourceName += c.cgName + replicationGroupSourceName = util.TrimToK8sResourceNameLength(replicationGroupSourceName + c.cgName) log := c.logger.WithName("CreateOrUpdateReplicationGroupSource"). WithValues("ReplicationGroupSourceName", replicationGroupSourceName, diff --git a/internal/controller/cephfscg/cghandler_test.go b/internal/controller/cephfscg/cghandler_test.go index e91a67691..cfa2228fb 100644 --- a/internal/controller/cephfscg/cghandler_test.go +++ b/internal/controller/cephfscg/cghandler_test.go @@ -4,8 +4,8 @@ import ( "context" volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" diff --git a/internal/controller/cephfscg/replicationgroupdestination_test.go b/internal/controller/cephfscg/replicationgroupdestination_test.go index 7a37882fe..8b8d67d93 100644 --- a/internal/controller/cephfscg/replicationgroupdestination_test.go +++ b/internal/controller/cephfscg/replicationgroupdestination_test.go @@ -6,7 +6,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/backube/volsync/controllers/statemachine" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" diff --git a/internal/controller/cephfscg/volumegroupsourcehandler.go b/internal/controller/cephfscg/volumegroupsourcehandler.go index 054379fee..940b92e2c 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler.go @@ -7,8 +7,8 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/util" "github.com/ramendr/ramen/internal/controller/volsync" @@ -84,16 +84,18 @@ func NewVolumeGroupSourceHandler( ) VolumeGroupSourceHandler { vrgName := rgs.GetLabels()[volsync.VRGOwnerNameLabel] + vgsName := util.TrimToK8sResourceNameLength(fmt.Sprintf(VolumeGroupSnapshotNameFormat, rgs.Name)) + return &volumeGroupSourceHandler{ Client: client, - VolumeGroupSnapshotName: fmt.Sprintf(VolumeGroupSnapshotNameFormat, rgs.Name), + VolumeGroupSnapshotName: vgsName, VolumeGroupSnapshotNamespace: rgs.Namespace, VolumeGroupSnapshotClassName: rgs.Spec.VolumeGroupSnapshotClassName, VolumeGroupLabel: rgs.Spec.VolumeGroupSnapshotSource, VolsyncKeySecretName: volsync.GetVolSyncPSKSecretNameFromVRGName(vrgName), DefaultCephFSCSIDriverName: defaultCephFSCSIDriverName, Logger: logger.WithName("VolumeGroupSourceHandler"). - WithValues("VolumeGroupSnapshotName", fmt.Sprintf(VolumeGroupSnapshotNameFormat, rgs.Name)). + WithValues("VolumeGroupSnapshotName", vgsName). WithValues("VolumeGroupSnapshotNamespace", rgs.Namespace), } } @@ -136,7 +138,7 @@ func (h *volumeGroupSourceHandler) CreateOrUpdateVolumeGroupSnapshot( return err } - logger.Info("VolumeGroupSnapshot successfully be created or updated", "operation", op) + logger.Info("VolumeGroupSnapshot successfully created or updated", "operation", op) return nil } @@ -166,20 +168,21 @@ func (h *volumeGroupSourceHandler) CleanVolumeGroupSnapshot( } if volumeGroupSnapshot.Status != nil { - for _, vsRef := range volumeGroupSnapshot.Status.VolumeSnapshotRefList { + for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { logger.Info("Get PVCName from volume snapshot", - "VolumeSnapshotName", vsRef.Name, "VolumeSnapshotNamespace", vsRef.Namespace) + "vsName", pvcVSRef.VolumeSnapshotRef.Name, "vsNamespace", volumeGroupSnapshot.Namespace) - pvc, err := GetPVCFromVolumeSnapshot(ctx, h.Client, vsRef.Name, vsRef.Namespace, volumeGroupSnapshot) + pvc, err := util.GetPVC(ctx, h.Client, + types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) if err != nil { logger.Error(err, "Failed to get PVC name from volume snapshot", - "VolumeSnapshotName", vsRef.Name, "VolumeSnapshotNamespace", vsRef.Namespace) + "pvcName", pvcVSRef.PersistentVolumeClaimRef.Name, "vsNamespace", volumeGroupSnapshot.Namespace) return err } restoredPVCName := fmt.Sprintf(RestorePVCinCGNameFormat, pvc.Name) - restoredPVCNamespace := vsRef.Namespace + restoredPVCNamespace := pvc.Namespace logger.Info("Delete restored PVCs", "PVCName", restoredPVCName, "PVCNamespace", restoredPVCNamespace) @@ -229,28 +232,32 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( restoredPVCs := []RestoredPVC{} - for _, vsRef := range volumeGroupSnapshot.Status.VolumeSnapshotRefList { + for _, pvcVSRef := range volumeGroupSnapshot.Status.PVCVolumeSnapshotRefList { logger.Info("Get PVCName from volume snapshot", - "VolumeSnapshotName", vsRef.Name, "VolumeSnapshotNamespace", vsRef.Namespace) + "PVCName", pvcVSRef.PersistentVolumeClaimRef.Name, "VolumeSnapshotName", pvcVSRef.VolumeSnapshotRef.Name) - pvc, err := GetPVCFromVolumeSnapshot(ctx, h.Client, vsRef.Name, vsRef.Namespace, volumeGroupSnapshot) + pvc, err := util.GetPVC(ctx, h.Client, + types.NamespacedName{Name: pvcVSRef.PersistentVolumeClaimRef.Name, Namespace: volumeGroupSnapshot.Namespace}) if err != nil { - return nil, fmt.Errorf("failed to get PVC name from volume snapshot %s: %w", vsRef.Namespace+"/"+vsRef.Name, err) + return nil, fmt.Errorf("failed to get PVC from VGS %s: %w", + volumeGroupSnapshot.Namespace+"/"+pvcVSRef.PersistentVolumeClaimRef.Name, err) } restoreStorageClass, err := GetRestoreStorageClass(ctx, h.Client, *pvc.Spec.StorageClassName, h.DefaultCephFSCSIDriverName) if err != nil { - return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+vsRef.Namespace, err) + return nil, fmt.Errorf("failed to get Restore Storage Class from PVC %s: %w", pvc.Name+"/"+pvc.Namespace, err) } RestoredPVCNamespacedName := types.NamespacedName{ - Namespace: vsRef.Namespace, + Namespace: pvc.Namespace, Name: fmt.Sprintf(RestorePVCinCGNameFormat, pvc.Name), } if err := h.RestoreVolumesFromSnapshot( - ctx, vsRef, pvc, RestoredPVCNamespacedName, restoreStorageClass.GetName(), owner); err != nil { - return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", vsRef.Name+"/"+vsRef.Namespace, err) + ctx, pvcVSRef.VolumeSnapshotRef.Name, pvc, RestoredPVCNamespacedName, + restoreStorageClass.GetName(), owner); err != nil { + return nil, fmt.Errorf("failed to restore volumes from snapshot %s: %w", + pvcVSRef.VolumeSnapshotRef.Name+"/"+pvc.Namespace, err) } logger.Info("Successfully restore volumes from snapshot", @@ -259,7 +266,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( restoredPVCs = append(restoredPVCs, RestoredPVC{ SourcePVCName: pvc.Name, RestoredPVCName: RestoredPVCNamespacedName.Name, - VolumeSnapshotName: vsRef.Name, + VolumeSnapshotName: pvcVSRef.VolumeSnapshotRef.Name, }) } @@ -273,7 +280,7 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromVolumeGroupSnapshot( //nolint:funlen,gocognit,cyclop func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( ctx context.Context, - vsRef corev1.ObjectReference, + vsName string, pvc *corev1.PersistentVolumeClaim, restoredPVCNamespacedname types.NamespacedName, restoreStorageClassName string, @@ -285,13 +292,13 @@ func (h *volumeGroupSourceHandler) RestoreVolumesFromSnapshot( volumeSnapshot := &vsv1.VolumeSnapshot{} if err := h.Client.Get(ctx, - types.NamespacedName{Name: vsRef.Name, Namespace: vsRef.Namespace}, + types.NamespacedName{Name: vsName, Namespace: pvc.Namespace}, volumeSnapshot, ); err != nil { return fmt.Errorf("failed to get volume snapshot: %w", err) } - snapshotRef := corev1.TypedLocalObjectReference{Name: vsRef.Name, APIGroup: &SnapshotGroup, Kind: SnapshotGroupKind} + snapshotRef := corev1.TypedLocalObjectReference{Name: vsName, APIGroup: &SnapshotGroup, Kind: SnapshotGroupKind} restoredPVC := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: restoredPVCNamespacedname.Name, @@ -497,61 +504,6 @@ func (h *volumeGroupSourceHandler) CheckReplicationSourceForRestoredPVCsComplete return true, nil } -var GetPVCFromVolumeSnapshot func( - ctx context.Context, k8sClient client.Client, vsName string, - vsNamespace string, vgs *vgsv1alphfa1.VolumeGroupSnapshot, -) (*corev1.PersistentVolumeClaim, error) - -func init() { - GetPVCFromVolumeSnapshot = FakeGetPVCFromVolumeSnapshot -} - -// TODO(wangyouhang): https://github.com/kubernetes-csi/external-snapshotter/issues/969 -// Fake func, need to be changed -func FakeGetPVCFromVolumeSnapshot( - ctx context.Context, k8sClient client.Client, vsName string, - vsNamespace string, vgs *vgsv1alphfa1.VolumeGroupSnapshot, -) (*corev1.PersistentVolumeClaim, error) { - if vgs.Status.BoundVolumeGroupSnapshotContentName == nil { - return nil, fmt.Errorf("BoundVolumeGroupSnapshotContentName is nil") - } - - // get vs index in vgs - var index int - - for i, VolumeSnapshotRef := range vgs.Status.VolumeSnapshotRefList { - if VolumeSnapshotRef.Name == vsName && VolumeSnapshotRef.Namespace == vsNamespace { - index = i - } - } - - // get storageHandle based on index - vgsc := &vgsv1alphfa1.VolumeGroupSnapshotContent{} - - err := k8sClient.Get(ctx, - types.NamespacedName{ - Name: *vgs.Status.BoundVolumeGroupSnapshotContentName, - Namespace: vgs.GetNamespace(), - }, - vgsc) - if err != nil { - return nil, err - } - - if len(vgs.Status.VolumeSnapshotRefList) != len(vgsc.Spec.Source.VolumeHandles) { - return nil, fmt.Errorf("len of vgs.Status.VolumeSnapshotRefList != len of vgsc.Spec.Source.VolumeHandles") - } - - storageHandle := vgsc.Spec.Source.VolumeHandles[index] - - pvc, err := GetPVCfromStorageHandle(ctx, k8sClient, storageHandle) - if err != nil { - return nil, fmt.Errorf("PVC is not found with storageHandle %s: %w", storageHandle, err) - } - - return pvc, nil -} - func GetPVCfromStorageHandle( ctx context.Context, k8sClient client.Client, diff --git a/internal/controller/cephfscg/volumegroupsourcehandler_test.go b/internal/controller/cephfscg/volumegroupsourcehandler_test.go index a3428688d..a3caefb7b 100644 --- a/internal/controller/cephfscg/volumegroupsourcehandler_test.go +++ b/internal/controller/cephfscg/volumegroupsourcehandler_test.go @@ -5,8 +5,8 @@ import ( "fmt" volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" - vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -25,6 +25,19 @@ import ( "github.com/ramendr/ramen/internal/controller/volsync" ) +var ( + vgsName = "vgs" + vgscName = "vgsc" + vsName = "vs" + anotherVSName = "another-vs" + vgsLabel = map[string]string{"test": "test"} + scName = "sc" + appPVCName = "apppvc" + anotherAppPVCName = "another-apppvc" + rsName = "rs" + manualString = "manual" +) + var _ = Describe("Volumegroupsourcehandler", func() { var volumeGroupSourceHandler cephfscg.VolumeGroupSourceHandler @@ -34,6 +47,9 @@ var _ = Describe("Volumegroupsourcehandler", func() { volumeGroupSourceHandler = cephfscg.NewVolumeGroupSourceHandler( k8sClient, rgs, internalController.DefaultCephFSCSIDriverName, testLogger, ) + + CreatePVC(appPVCName) + CreatePVC(anotherAppPVCName) }) Describe("CreateOrUpdateVolumeGroupSnapshot", func() { It("Should be successful", func() { @@ -69,11 +85,9 @@ var _ = Describe("Volumegroupsourcehandler", func() { }) Context("Restored PVC exist", func() { BeforeEach(func() { - EnableFakeGetPVCFromVolumeSnapshot() - err := volumeGroupSourceHandler.CreateOrUpdateVolumeGroupSnapshot(context.TODO(), rgs) Expect(err).To(BeNil()) - UpdateVGS(rgs, vsName) + UpdateVGS(rgs, vsName, appPVCName) CreateRestoredPVC(vsName) }) @@ -113,10 +127,9 @@ var _ = Describe("Volumegroupsourcehandler", func() { }) Context("VolumeGroupSnapshot is ready", func() { BeforeEach(func() { - EnableFakeGetPVCFromVolumeSnapshot() CreateStorageClass() - CreateVS(vsName + "0") - UpdateVGS(rgs, vsName+"0") + CreateVS(anotherVSName) + UpdateVGS(rgs, anotherVSName, anotherAppPVCName) }) It("Should be failed", func() { restoredPVCs, err := volumeGroupSourceHandler.RestoreVolumesFromVolumeGroupSnapshot(context.Background(), rgs) @@ -175,17 +188,6 @@ var _ = Describe("Volumegroupsourcehandler", func() { }) }) -var ( - vgsName = "vgs" - vgscName = "vgsc" - vsName = "vs" - vgsLabel = map[string]string{"test": "test"} - scName = "sc" - appPVCName = "apppvc" - rsName = "rs" - manualString = "manual" -) - func CreateRS(rsName string) { rs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ @@ -244,32 +246,7 @@ func GenerateReplicationGroupSource( } } -func EnableFakeGetPVCFromVolumeSnapshot() { - cephfscg.GetPVCFromVolumeSnapshot = func( - ctx context.Context, k8sClient client.Client, vsName string, - vsNamespace string, vgs *vgsv1alphfa1.VolumeGroupSnapshot, - ) (*corev1.PersistentVolumeClaim, error) { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: vsName, - Namespace: vsNamespace, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: &scName, - Resources: corev1.VolumeResourceRequirements{ - Limits: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), - }, - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), - }, - }, - }, - }, nil - } -} - -func UpdateVGS(rgs *v1alpha1.ReplicationGroupSource, vsName string) { +func UpdateVGS(rgs *v1alpha1.ReplicationGroupSource, vsName, pvcName string) { retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { volumeGroupSnapshot := &vgsv1alphfa1.VolumeGroupSnapshot{} err := k8sClient.Get(context.TODO(), types.NamespacedName{ @@ -282,11 +259,9 @@ func UpdateVGS(rgs *v1alpha1.ReplicationGroupSource, vsName string) { ready := true volumeGroupSnapshot.Status = &vgsv1alphfa1.VolumeGroupSnapshotStatus{ ReadyToUse: &ready, - VolumeSnapshotRefList: []corev1.ObjectReference{{ - Name: vsName, - Namespace: "default", - Kind: "VolumeSnapshot", - APIVersion: "v1", + PVCVolumeSnapshotRefList: []vgsv1alphfa1.PVCVolumeSnapshotPair{{ + VolumeSnapshotRef: corev1.LocalObjectReference{Name: vsName}, + PersistentVolumeClaimRef: corev1.LocalObjectReference{Name: pvcName}, }}, } diff --git a/internal/controller/controllers_utils_test.go b/internal/controller/controllers_utils_test.go index 34c0f5c15..cb5882367 100644 --- a/internal/controller/controllers_utils_test.go +++ b/internal/controller/controllers_utils_test.go @@ -11,8 +11,8 @@ import ( . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" gomegaTypes "github.com/onsi/gomega/types" - ocmv1 "github.com/open-cluster-management/api/cluster/v1" - workv1 "github.com/open-cluster-management/api/work/v1" + ocmv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" "sigs.k8s.io/controller-runtime/pkg/client" ramen "github.com/ramendr/ramen/api/v1alpha1" diff --git a/internal/controller/drcluster_controller.go b/internal/controller/drcluster_controller.go index df7a27443..a1311f7b6 100644 --- a/internal/controller/drcluster_controller.go +++ b/internal/controller/drcluster_controller.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/google/uuid" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -20,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + ocmworkv1 "open-cluster-management.io/api/work/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,7 +43,7 @@ type DRClusterReconciler struct { Scheme *runtime.Scheme MCVGetter util.ManagedClusterViewGetter ObjectStoreGetter ObjectStoreGetter - RateLimiter *workqueue.RateLimiter + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] } // DRCluster condition reasons @@ -676,6 +676,10 @@ func (u *drclusterInstance) generateDRClusterConfig() (*ramen.DRClusterConfig, e added := map[string]bool{} for idx := range drpolicies.Items { + if util.ResourceIsDeleted(&drpolicies.Items[idx]) { + continue + } + if drpolicies.Items[idx].Spec.SchedulingInterval == "" { continue } diff --git a/internal/controller/drcluster_controller_test.go b/internal/controller/drcluster_controller_test.go index 69a1b0262..5d39200a8 100644 --- a/internal/controller/drcluster_controller_test.go +++ b/internal/controller/drcluster_controller_test.go @@ -10,7 +10,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - workv1 "github.com/open-cluster-management/api/work/v1" ramen "github.com/ramendr/ramen/api/v1alpha1" controllers "github.com/ramendr/ramen/internal/controller" "github.com/ramendr/ramen/internal/controller/util" @@ -19,6 +18,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + workv1 "open-cluster-management.io/api/work/v1" ctrl "sigs.k8s.io/controller-runtime" ) diff --git a/internal/controller/drcluster_drcconfig_test.go b/internal/controller/drcluster_drcconfig_test.go index 1a65354bb..2cbb89f27 100644 --- a/internal/controller/drcluster_drcconfig_test.go +++ b/internal/controller/drcluster_drcconfig_test.go @@ -12,11 +12,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - ocmv1 "github.com/open-cluster-management/api/cluster/v1" + ocmv1 "open-cluster-management.io/api/cluster/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,6 +25,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" config "k8s.io/component-base/config/v1alpha1" + "k8s.io/utils/ptr" ramen "github.com/ramendr/ramen/api/v1alpha1" ramencontrollers "github.com/ramendr/ramen/internal/controller" @@ -112,6 +114,7 @@ var _ = Describe("DRCluster-DRClusterConfigTests", Ordered, func() { By("starting the DRCluster reconciler") options := manager.Options{Scheme: scheme.Scheme} + options.Controller.SkipNameValidation = ptr.To(true) ramencontrollers.LoadControllerOptions(&options, ramenConfig) k8sManager, err := ctrl.NewManager(cfg, options) @@ -119,8 +122,8 @@ var _ = Describe("DRCluster-DRClusterConfigTests", Ordered, func() { apiReader = k8sManager.GetAPIReader() - rateLimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(10*time.Millisecond, 100*time.Millisecond), + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](10*time.Millisecond, 100*time.Millisecond), ) Expect((&ramencontrollers.DRClusterReconciler{ diff --git a/internal/controller/drcluster_mmode.go b/internal/controller/drcluster_mmode.go index ce5129468..6f6f0140c 100644 --- a/internal/controller/drcluster_mmode.go +++ b/internal/controller/drcluster_mmode.go @@ -5,10 +5,10 @@ package controllers import ( "github.com/go-logr/logr" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ocmworkv1 "open-cluster-management.io/api/work/v1" ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/util" diff --git a/internal/controller/drcluster_mmode_test.go b/internal/controller/drcluster_mmode_test.go index d256deea2..623714706 100644 --- a/internal/controller/drcluster_mmode_test.go +++ b/internal/controller/drcluster_mmode_test.go @@ -11,12 +11,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" + ocmworkv1 "open-cluster-management.io/api/work/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,6 +26,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" config "k8s.io/component-base/config/v1alpha1" + "k8s.io/utils/ptr" rmn "github.com/ramendr/ramen/api/v1alpha1" ramencontrollers "github.com/ramendr/ramen/internal/controller" @@ -146,6 +148,7 @@ var _ = Describe("DRClusterMModeTests", Ordered, func() { By("starting the DRCluster reconciler") options := manager.Options{Scheme: scheme.Scheme} + options.Controller.SkipNameValidation = ptr.To(true) ramencontrollers.LoadControllerOptions(&options, ramenConfig) Expect(err).NotTo(HaveOccurred()) @@ -153,8 +156,8 @@ var _ = Describe("DRClusterMModeTests", Ordered, func() { k8sManager, err := ctrl.NewManager(cfg, options) Expect(err).ToNot(HaveOccurred()) - rateLimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(10*time.Millisecond, 100*time.Millisecond), + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](10*time.Millisecond, 100*time.Millisecond), ) Expect((&ramencontrollers.DRClusterReconciler{ diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index 6cdfb7d32..2cb7ea7ce 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -5,45 +5,377 @@ package controllers import ( "context" + "fmt" + "slices" + "time" + volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + "golang.org/x/time/rate" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" + ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" - ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + "github.com/go-logr/logr" + "github.com/google/uuid" + ramen "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/internal/controller/util" +) + +const ( + drCConfigFinalizerName = "drclusterconfigs.ramendr.openshift.io/finalizer" + drCConfigOwnerLabel = "drclusterconfigs.ramendr.openshift.io/owner" + drCConfigOwnerName = "ramen" + + maxReconcileBackoff = 5 * time.Minute + + // Prefixes for various ClusterClaims + ccSCPrefix = "storage.class" + ccVSCPrefix = "snapshot.class" + ccVRCPrefix = "replication.class" ) // DRClusterConfigReconciler reconciles a DRClusterConfig object type DRClusterConfigReconciler struct { client.Client - Scheme *runtime.Scheme -} - -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the DRClusterConfig object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.3/pkg/reconcile + Scheme *runtime.Scheme + Log logr.Logger + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] +} + +//nolint:lll +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplicationclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.open-cluster-management.io,resources=clusterclaims,verbs=get;list;watch;create;update;delete + func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + log := r.Log.WithValues("name", req.NamespacedName.Name, "rid", uuid.New()) + log.Info("reconcile enter") + + defer log.Info("reconcile exit") + + drCConfig := &ramen.DRClusterConfig{} + if err := r.Client.Get(ctx, req.NamespacedName, drCConfig); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err)) + } + + // Ensure there is ony one DRClusterConfig for the cluster + if _, err := r.GetDRClusterConfig(ctx); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{}, err + } + + if util.ResourceIsDeleted(drCConfig) { + return r.processDeletion(ctx, log, drCConfig) + } + + return r.processCreateOrUpdate(ctx, log, drCConfig) +} + +func (r *DRClusterConfigReconciler) GetDRClusterConfig(ctx context.Context) (*ramen.DRClusterConfig, error) { + drcConfigs := &ramen.DRClusterConfigList{} + if err := r.Client.List(ctx, drcConfigs); err != nil { + return nil, fmt.Errorf("failed to list DRClusterConfig, %w", err) + } + + if len(drcConfigs.Items) == 0 { + return nil, fmt.Errorf("failed to find DRClusterConfig") + } + + if len(drcConfigs.Items) > 1 { + return nil, fmt.Errorf("multiple DRClusterConfigs found") + } + + return &drcConfigs.Items[0], nil +} + +// processDeletion ensures all cluster claims created by drClusterConfig are deleted, before removing the finalizer on +// the resource itself +func (r *DRClusterConfigReconciler) processDeletion( + ctx context.Context, + log logr.Logger, + drCConfig *ramen.DRClusterConfig, +) (ctrl.Result, error) { + if err := r.pruneClusterClaims(ctx, log, []string{}); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, err + } - // TODO(user): your logic here + if err := util.NewResourceUpdater(drCConfig). + RemoveFinalizer(drCConfigFinalizerName). + Update(ctx, r.Client); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, + fmt.Errorf("failed to remove finalizer for DRClusterConfig resource, %w", err) + } return ctrl.Result{}, nil } +// pruneClusterClaims will prune all ClusterClaims created by drClusterConfig that are not in the +// passed in survivor list +func (r *DRClusterConfigReconciler) pruneClusterClaims(ctx context.Context, log logr.Logger, survivors []string) error { + matchLabels := map[string]string{ + drCConfigOwnerLabel: drCConfigOwnerName, + } + + listOptions := []client.ListOption{ + client.MatchingLabels(matchLabels), + } + + claims := &clusterv1alpha1.ClusterClaimList{} + if err := r.Client.List(ctx, claims, listOptions...); err != nil { + return fmt.Errorf("failed to list ClusterClaims, %w", err) + } + + for idx := range claims.Items { + if slices.Contains(survivors, claims.Items[idx].GetName()) { + continue + } + + if err := r.Client.Delete(ctx, &claims.Items[idx]); err != nil { + return fmt.Errorf("failed to delete ClusterClaim %s, %w", claims.Items[idx].GetName(), err) + } + + log.Info("Pruned ClusterClaim", "claimName", claims.Items[idx].GetName()) + } + + return nil +} + +// processCreateOrUpdate protects the resource with a finalizer and creates ClusterClaims for various storage related +// classes in the cluster. It would finally prune stale ClusterClaims from previous reconciliations. +func (r *DRClusterConfigReconciler) processCreateOrUpdate( + ctx context.Context, + log logr.Logger, + drCConfig *ramen.DRClusterConfig, +) (ctrl.Result, error) { + if err := util.NewResourceUpdater(drCConfig). + AddFinalizer(drCConfigFinalizerName). + Update(ctx, r.Client); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to add finalizer for DRClusterConfig resource, %w", err) + } + + allSurvivors, err := r.CreateClassClaims(ctx, log) + if err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, err + } + + if err := r.pruneClusterClaims(ctx, log, allSurvivors); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, err + } + + return ctrl.Result{}, nil +} + +// CreateClassClaims creates cluster claims for various storage related classes of interest +func (r *DRClusterConfigReconciler) CreateClassClaims(ctx context.Context, log logr.Logger) ([]string, error) { + allSurvivors := []string{} + + survivors, err := r.createSCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + survivors, err = r.createVSCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + survivors, err = r.createVRCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + return allSurvivors, nil +} + +// createSCClusterClaims lists StorageClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createSCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + sClasses := &storagev1.StorageClassList{} + if err := r.Client.List(ctx, sClasses); err != nil { + return nil, fmt.Errorf("failed to list StorageClasses, %w", err) + } + + for i := range sClasses.Items { + if !util.HasLabel(&sClasses.Items[i], StorageIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccSCPrefix, sClasses.Items[i].GetName())) + } + + return claims, nil +} + +// createVSCClusterClaims lists VolumeSnapshotClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createVSCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + vsClasses := &snapv1.VolumeSnapshotClassList{} + if err := r.Client.List(ctx, vsClasses); err != nil { + return nil, fmt.Errorf("failed to list VolumeSnapshotClasses, %w", err) + } + + for i := range vsClasses.Items { + if !util.HasLabel(&vsClasses.Items[i], StorageIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccVSCPrefix, vsClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccVSCPrefix, vsClasses.Items[i].GetName())) + } + + return claims, nil +} + +// createVRCClusterClaims lists VolumeReplicationClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createVRCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + vrClasses := &volrep.VolumeReplicationClassList{} + if err := r.Client.List(ctx, vrClasses); err != nil { + return nil, fmt.Errorf("failed to list VolumeReplicationClasses, %w", err) + } + + for i := range vrClasses.Items { + if !util.HasLabel(&vrClasses.Items[i], VolumeReplicationIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccVRCPrefix, vrClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccVRCPrefix, vrClasses.Items[i].GetName())) + } + + return claims, nil +} + +// ensureClusterClaim is a generic ClusterClaim creation function, that creates a claim named "prefix.name", with +// the passed in name as the ClusterClaim spec.Value +func (r *DRClusterConfigReconciler) ensureClusterClaim( + ctx context.Context, + log logr.Logger, + prefix, name string, +) error { + cc := &clusterv1alpha1.ClusterClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName(prefix, name), + }, + } + + if _, err := ctrl.CreateOrUpdate(ctx, r.Client, cc, func() error { + util.NewResourceUpdater(cc).AddLabel(drCConfigOwnerLabel, drCConfigOwnerName) + + cc.Spec.Value = name + + return nil + }); err != nil { + return fmt.Errorf("failed to create or update ClusterClaim %s, %w", claimName(prefix, name), err) + } + + log.Info("Created ClusterClaim", "claimName", cc.GetName()) + + return nil +} + +func claimName(prefix, name string) string { + return prefix + "." + name +} + // SetupWithManager sets up the controller with the Manager. func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&ramendrv1alpha1.DRClusterConfig{}). + drccMapFn := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + drcConfig, err := r.GetDRClusterConfig(ctx) + if err != nil { + ctrl.Log.Info(fmt.Sprintf("failed processing DRClusterConfig mapping, %v", err)) + + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: drcConfig.GetName(), + }, + }, + } + }), + ) + + drccPredFn := builder.WithPredicates(predicate.NewPredicateFuncs( + func(object client.Object) bool { + return true + }), + ) + + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](1*time.Second, maxReconcileBackoff), + // defaults from client-go + //nolint: gomnd + &workqueue.TypedBucketRateLimiter[reconcile.Request]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) + + if r.RateLimiter != nil { + rateLimiter = *r.RateLimiter + } + + controller := ctrl.NewControllerManagedBy(mgr) + + return controller.WithOptions(ctrlcontroller.Options{ + RateLimiter: rateLimiter, + }).For(&ramen.DRClusterConfig{}). + Watches(&storagev1.StorageClass{}, drccMapFn, drccPredFn). + Watches(&snapv1.VolumeSnapshotClass{}, drccMapFn, drccPredFn). + Watches(&volrep.VolumeReplicationClass{}, drccMapFn, drccPredFn). Complete(r) } diff --git a/internal/controller/drclusterconfig_controller_test.go b/internal/controller/drclusterconfig_controller_test.go index 571d56b26..5b2985b43 100644 --- a/internal/controller/drclusterconfig_controller_test.go +++ b/internal/controller/drclusterconfig_controller_test.go @@ -5,68 +5,381 @@ package controllers_test import ( "context" + "fmt" + "os" + "path/filepath" + "time" + volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" + config "k8s.io/component-base/config/v1alpha1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + ramen "github.com/ramendr/ramen/api/v1alpha1" ramencontrollers "github.com/ramendr/ramen/internal/controller" ) -var _ = Describe("DRClusterConfig Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" +func ensureClaimCount(apiReader client.Reader, count int) { + Eventually(func() bool { + claims := &clusterv1alpha1.ClusterClaimList{} + + err := apiReader.List(context.TODO(), claims) + if err != nil { + return false + } + + return len(claims.Items) == count + }, timeout, interval).Should(BeTrue()) +} + +func ensureClusterClaim(apiReader client.Reader, class, name string) { + Eventually(func() error { + ccName := types.NamespacedName{ + Name: class + "." + name, + } + + cc := &clusterv1alpha1.ClusterClaim{} + err := apiReader.Get(context.TODO(), ccName, cc) + if err != nil { + return err + } + + if cc.Spec.Value != name { + return fmt.Errorf("mismatched spec.value in ClusterClaim, expected %s, got %s", + name, cc.Spec.Value) + } + + return nil + }, timeout, interval).Should(BeNil()) +} + +var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { + var ( + ctx context.Context + cancel context.CancelFunc + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + apiReader client.Reader + drCConfig *ramen.DRClusterConfig + baseSC, sc1, sc2 *storagev1.StorageClass + baseVSC, vsc1, vsc2 *snapv1.VolumeSnapshotClass + baseVRC, vrc1, vrc2 *volrep.VolumeReplicationClass + claimCount int + ) + + BeforeAll(func() { + By("bootstrapping test environment") + + Expect(os.Setenv("POD_NAMESPACE", ramenNamespace)).To(Succeed()) + + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "hack", "test"), + }, + } + + if testEnv.UseExistingCluster != nil && *testEnv.UseExistingCluster == true { + namespaceDeletionSupported = true + } + + var err error + done := make(chan interface{}) + go func() { + defer GinkgoRecover() + cfg, err = testEnv.Start() + close(done) + }() + Eventually(done).WithTimeout(time.Minute).Should(BeClosed()) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) - ctx := context.Background() + By("starting the DRClusterConfig reconciler") - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed + ramenConfig := &ramen.RamenConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "RamenConfig", + APIVersion: ramen.GroupVersion.String(), + }, + LeaderElection: &config.LeaderElectionConfiguration{ + LeaderElect: new(bool), + ResourceName: ramencontrollers.HubLeaderElectionResourceName, + }, + Metrics: ramen.ControllerMetrics{ + BindAddress: "0", // Disable metrics + }, } - drclusterconfig := &ramendrv1alpha1.DRClusterConfig{} - - BeforeEach(func() { - By("creating the custom resource for the Kind DRClusterConfig") - err := k8sClient.Get(ctx, typeNamespacedName, drclusterconfig) - if err != nil && errors.IsNotFound(err) { - resource := &ramendrv1alpha1.DRClusterConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } + + options := manager.Options{Scheme: scheme.Scheme} + ramencontrollers.LoadControllerOptions(&options, ramenConfig) + + k8sManager, err := ctrl.NewManager(cfg, options) + Expect(err).ToNot(HaveOccurred()) + apiReader = k8sManager.GetAPIReader() + Expect(apiReader).ToNot(BeNil()) + + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request]( + 10*time.Millisecond, + 100*time.Millisecond), + ) + + Expect((&ramencontrollers.DRClusterConfigReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DRClusterConfig"), + RateLimiter: &rateLimiter, + }).SetupWithManager(k8sManager)).To(Succeed()) + + ctx, cancel = context.WithCancel(context.TODO()) + go func() { + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred()) + }() + + By("Creating a DClusterConfig") + + drCConfig = &ramen.DRClusterConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "local"}, + Spec: ramen.DRClusterConfigSpec{}, + } + Expect(k8sClient.Create(context.TODO(), drCConfig)).To(Succeed()) + + By("Defining basic Classes") + + baseSC = &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseSC", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Provisioner: "fake.ramen.com", + } + + baseVSC = &snapv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseVSC", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Driver: "fake.ramen.com", + DeletionPolicy: snapv1.VolumeSnapshotContentDelete, + } + + baseVRC = &volrep.VolumeReplicationClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseVRC", + Labels: map[string]string{ + ramencontrollers.VolumeReplicationIDLabel: "fake", + }, + }, + Spec: volrep.VolumeReplicationClassSpec{ + Provisioner: "fake.ramen.com", + }, + } + }) + + AfterAll(func() { + By("deleting the DRClusterConfig") + Expect(k8sClient.Delete(context.TODO(), drCConfig)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(context.TODO(), types.NamespacedName{ + Name: "local", + }, drCConfig) + + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + + By("ensuring claim count is 0 post deletion") + ensureClaimCount(apiReader, 0) + + cancel() // Stop the reconciler + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("ClusterClaims", Ordered, func() { + Context("Given DRClusterConfig resource", func() { + When("there is a StorageClass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a StorageClass") + + sc1 = baseSC.DeepCopy() + sc1.Name = "sc1" + Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + + claimCount++ + ensureClusterClaim(apiReader, "storage.class", "sc1") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a StorageClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a StorageClass") + + Expect(k8sClient.Delete(context.TODO(), sc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple StorageClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a StorageClass") + + sc1 = baseSC.DeepCopy() + sc1.Name = "sc1" + Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + + sc2 = baseSC.DeepCopy() + sc2.Name = "sc2" + Expect(k8sClient.Create(context.TODO(), sc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "storage.class", "sc1") + ensureClusterClaim(apiReader, "storage.class", "sc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a StorageClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a StorageClass label") + + sc1.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), sc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "storage.class", "sc2") + }) + }) }) + When("there is a SnapshotCLass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a SnapshotClass") - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &ramendrv1alpha1.DRClusterConfig{} - err := apiReader.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) + vsc1 = baseVSC.DeepCopy() + vsc1.Name = "vsc1" + Expect(k8sClient.Create(context.TODO(), vsc1)).To(Succeed()) - By("Cleanup the specific resource instance DRClusterConfig") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + claimCount++ + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + ensureClaimCount(apiReader, claimCount) + }) }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &ramencontrollers.DRClusterConfigReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } - - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, + When("a SnapshotClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a SnapshotClass") + + Expect(k8sClient.Delete(context.TODO(), vsc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple SnapshotClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a SnapshotClass") + + vsc1 = baseVSC.DeepCopy() + vsc1.Name = "vsc1" + Expect(k8sClient.Create(context.TODO(), vsc1)).To(Succeed()) + + vsc2 = baseVSC.DeepCopy() + vsc2.Name = "vsc2" + Expect(k8sClient.Create(context.TODO(), vsc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + ensureClusterClaim(apiReader, "snapshot.class", "vsc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a SnapshotClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a SnapshotClass label") + + vsc2.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), vsc2)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + }) + }) + When("there is a VolumeReplicationCLass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a VolumeReplicationClass") + + vrc1 = baseVRC.DeepCopy() + vrc1.Name = "vrc1" + Expect(k8sClient.Create(context.TODO(), vrc1)).To(Succeed()) + + claimCount++ + ensureClusterClaim(apiReader, "replication.class", "vrc1") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a VolumeReplicationClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a VolumeReplicationClass") + + Expect(k8sClient.Delete(context.TODO(), vrc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple VolumeReplicationClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a VolumeReplicationClass") + + vrc1 = baseVRC.DeepCopy() + vrc1.Name = "vrc1" + Expect(k8sClient.Create(context.TODO(), vrc1)).To(Succeed()) + + vrc2 = baseVRC.DeepCopy() + vrc2.Name = "vrc2" + Expect(k8sClient.Create(context.TODO(), vrc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "replication.class", "vrc1") + ensureClusterClaim(apiReader, "replication.class", "vrc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a VolumeReplicationClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a VolumeReplicationClass label") + + vrc2.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), vrc2)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "replication.class", "vrc1") }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/drplacementcontrol.go b/internal/controller/drplacementcontrol.go index 8eee6d53b..4ce206d5a 100644 --- a/internal/controller/drplacementcontrol.go +++ b/internal/controller/drplacementcontrol.go @@ -10,12 +10,12 @@ import ( "time" "github.com/go-logr/logr" - clrapiv1beta1 "github.com/open-cluster-management-io/api/cluster/v1beta1" errorswrapper "github.com/pkg/errors" "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" rmn "github.com/ramendr/ramen/api/v1alpha1" rmnutil "github.com/ramendr/ramen/internal/controller/util" @@ -825,6 +825,14 @@ func (d *DRPCInstance) RunRelocate() (bool, error) { const done = true + if d.reconciler.numClustersQueriedSuccessfully != len(d.drPolicy.Spec.DRClusters) { + d.log.Info("Can't progress with relocation -- Not all clusters are reachable", + "numClustersQueriedSuccessfully", d.reconciler.numClustersQueriedSuccessfully, + "NumOfClusters", len(d.drPolicy.Spec.DRClusters)) + + return !done, nil + } + preferredCluster := d.instance.Spec.PreferredCluster preferredClusterNamespace := d.instance.Spec.PreferredCluster @@ -1842,7 +1850,8 @@ func (d *DRPCInstance) ensureVRGManifestWorkOnClusterDeleted(clusterName string) } if d.ensureVRGIsSecondaryOnCluster(clusterName) { - err := d.mwu.DeleteManifestWorksForCluster(clusterName) + // delete VRG manifest work + err = d.mwu.DeleteManifestWork(d.mwu.BuildManifestWorkName(rmnutil.MWTypeVRG), clusterName) if err != nil { return !done, fmt.Errorf("%w", err) } diff --git a/internal/controller/drplacementcontrol_controller.go b/internal/controller/drplacementcontrol_controller.go index adab0b1cd..227b2af7c 100644 --- a/internal/controller/drplacementcontrol_controller.go +++ b/internal/controller/drplacementcontrol_controller.go @@ -13,9 +13,7 @@ import ( "github.com/go-logr/logr" "github.com/google/uuid" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" errorswrapper "github.com/pkg/errors" - viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -25,20 +23,15 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - clrapiv1beta1 "github.com/open-cluster-management-io/api/cluster/v1beta1" rmn "github.com/ramendr/ramen/api/v1alpha1" argocdv1alpha1hack "github.com/ramendr/ramen/internal/controller/argocd" rmnutil "github.com/ramendr/ramen/internal/controller/util" "github.com/ramendr/ramen/internal/controller/volsync" + clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" ) const ( @@ -75,581 +68,23 @@ type ProgressCallback func(string, string) // DRPlacementControlReconciler reconciles a DRPlacementControl object type DRPlacementControlReconciler struct { client.Client - APIReader client.Reader - Log logr.Logger - MCVGetter rmnutil.ManagedClusterViewGetter - Scheme *runtime.Scheme - Callback ProgressCallback - eventRecorder *rmnutil.EventReporter - savedInstanceStatus rmn.DRPlacementControlStatus - ObjStoreGetter ObjectStoreGetter - RateLimiter *workqueue.RateLimiter -} - -func ManifestWorkPredicateFunc() predicate.Funcs { - mwPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - log := ctrl.Log.WithName("Predicate").WithName("ManifestWork") - - oldMW, ok := e.ObjectOld.DeepCopyObject().(*ocmworkv1.ManifestWork) - if !ok { - log.Info("Failed to deep copy older ManifestWork") - - return false - } - newMW, ok := e.ObjectNew.DeepCopyObject().(*ocmworkv1.ManifestWork) - if !ok { - log.Info("Failed to deep copy newer ManifestWork") - - return false - } - - log.Info(fmt.Sprintf("Update event for MW %s/%s", oldMW.Name, oldMW.Namespace)) - - return !reflect.DeepEqual(oldMW.Status, newMW.Status) - }, - } - - return mwPredicate -} - -func filterMW(mw *ocmworkv1.ManifestWork) []ctrl.Request { - if mw.Annotations[DRPCNameAnnotation] == "" || - mw.Annotations[DRPCNamespaceAnnotation] == "" { - return []ctrl.Request{} - } - - return []ctrl.Request{ - reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: mw.Annotations[DRPCNameAnnotation], - Namespace: mw.Annotations[DRPCNamespaceAnnotation], - }, - }, - } -} - -func ManagedClusterViewPredicateFunc() predicate.Funcs { - log := ctrl.Log.WithName("Predicate").WithName("MCV") - mcvPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - oldMCV, ok := e.ObjectOld.DeepCopyObject().(*viewv1beta1.ManagedClusterView) - if !ok { - log.Info("Failed to deep copy older MCV") - - return false - } - newMCV, ok := e.ObjectNew.DeepCopyObject().(*viewv1beta1.ManagedClusterView) - if !ok { - log.Info("Failed to deep copy newer MCV") - - return false - } - - log.Info(fmt.Sprintf("Update event for MCV %s/%s", oldMCV.Name, oldMCV.Namespace)) - - return !reflect.DeepEqual(oldMCV.Status, newMCV.Status) - }, - } - - return mcvPredicate -} - -func filterMCV(mcv *viewv1beta1.ManagedClusterView) []ctrl.Request { - if mcv.Annotations[DRPCNameAnnotation] == "" || - mcv.Annotations[DRPCNamespaceAnnotation] == "" { - return []ctrl.Request{} - } - - return []ctrl.Request{ - reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: mcv.Annotations[DRPCNameAnnotation], - Namespace: mcv.Annotations[DRPCNamespaceAnnotation], - }, - }, - } -} - -func PlacementRulePredicateFunc() predicate.Funcs { - log := ctrl.Log.WithName("DRPCPredicate").WithName("UserPlRule") - usrPlRulePredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - log.Info("Delete event") - - return true - }, - } - - return usrPlRulePredicate -} - -func filterUsrPlRule(usrPlRule *plrv1.PlacementRule) []ctrl.Request { - if usrPlRule.Annotations[DRPCNameAnnotation] == "" || - usrPlRule.Annotations[DRPCNamespaceAnnotation] == "" { - return []ctrl.Request{} - } - - return []ctrl.Request{ - reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: usrPlRule.Annotations[DRPCNameAnnotation], - Namespace: usrPlRule.Annotations[DRPCNamespaceAnnotation], - }, - }, - } -} - -func PlacementPredicateFunc() predicate.Funcs { - log := ctrl.Log.WithName("DRPCPredicate").WithName("UserPlmnt") - usrPlmntPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - log.Info("Delete event") - - return true - }, - } - - return usrPlmntPredicate -} - -func filterUsrPlmnt(usrPlmnt *clrapiv1beta1.Placement) []ctrl.Request { - if usrPlmnt.Annotations[DRPCNameAnnotation] == "" || - usrPlmnt.Annotations[DRPCNamespaceAnnotation] == "" { - return []ctrl.Request{} - } - - return []ctrl.Request{ - reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: usrPlmnt.Annotations[DRPCNameAnnotation], - Namespace: usrPlmnt.Annotations[DRPCNamespaceAnnotation], - }, - }, - } -} - -func DRClusterPredicateFunc() predicate.Funcs { - log := ctrl.Log.WithName("DRPCPredicate").WithName("DRCluster") - drClusterPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return false - }, - GenericFunc: func(e event.GenericEvent) bool { - return false - }, - UpdateFunc: func(e event.UpdateEvent) bool { - log.Info("Update event") - - return DRClusterUpdateOfInterest(e.ObjectOld.(*rmn.DRCluster), e.ObjectNew.(*rmn.DRCluster)) - }, - } - - return drClusterPredicate -} - -// DRClusterUpdateOfInterest checks if the new DRCluster resource as compared to the older version -// requires any attention, it checks for the following updates: -// - If any maintenance mode is reported as activated -// - If drcluster was marked for deletion -// -// TODO: Needs some logs for easier troubleshooting -func DRClusterUpdateOfInterest(oldDRCluster, newDRCluster *rmn.DRCluster) bool { - for _, mModeNew := range newDRCluster.Status.MaintenanceModes { - // Check if new conditions have failover activated, if not this maintenance mode is NOT of interest - conditionNew := getFailoverActivatedCondition(mModeNew) - if conditionNew == nil || - conditionNew.Status == metav1.ConditionFalse || - conditionNew.Status == metav1.ConditionUnknown { - continue - } - - // Check if failover maintenance mode was already activated as part of an older update to DRCluster, if NOT - // this change is of interest - if activated := checkFailoverActivation(oldDRCluster, mModeNew.StorageProvisioner, mModeNew.TargetID); !activated { - return true - } - } - - // Exhausted all failover activation checks, the only interesting update is deleting a drcluster. - return rmnutil.ResourceIsDeleted(newDRCluster) -} - -// checkFailoverActivation checks if provided provisioner and storage instance is activated as per the -// passed in DRCluster resource status. It currently only checks for: -// - Failover activation condition -func checkFailoverActivation(drcluster *rmn.DRCluster, provisioner string, targetID string) bool { - for _, mMode := range drcluster.Status.MaintenanceModes { - if !(mMode.StorageProvisioner == provisioner && mMode.TargetID == targetID) { - continue - } - - condition := getFailoverActivatedCondition(mMode) - if condition == nil || - condition.Status == metav1.ConditionFalse || - condition.Status == metav1.ConditionUnknown { - return false - } - - return true - } - - return false -} - -// getFailoverActivatedCondition is a helper routine that returns the FailoverActivated condition -// from a given ClusterMaintenanceMode if found, or nil otherwise -func getFailoverActivatedCondition(mMode rmn.ClusterMaintenanceMode) *metav1.Condition { - for _, condition := range mMode.Conditions { - if condition.Type != string(rmn.MModeConditionFailoverActivated) { - continue - } - - return &condition - } - - return nil -} - -// FilterDRCluster filters for DRPC resources that should be reconciled due to a DRCluster watch event -func (r *DRPlacementControlReconciler) FilterDRCluster(drcluster *rmn.DRCluster) []ctrl.Request { - log := ctrl.Log.WithName("DRPCFilter").WithName("DRCluster").WithValues("cluster", drcluster) - - var drpcCollections []DRPCAndPolicy - - var err error - - if rmnutil.ResourceIsDeleted(drcluster) { - drpcCollections, err = DRPCsUsingDRCluster(r.Client, log, drcluster) - } else { - drpcCollections, err = DRPCsFailingOverToCluster(r.Client, log, drcluster.GetName()) - } - - if err != nil { - log.Info("Failed to process filter") - - return nil - } - - requests := make([]reconcile.Request, 0) - for idx := range drpcCollections { - requests = append(requests, - reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: drpcCollections[idx].drpc.GetName(), - Namespace: drpcCollections[idx].drpc.GetNamespace(), - }, - }) - } - - return requests -} - -type DRPCAndPolicy struct { - drpc *rmn.DRPlacementControl - drPolicy *rmn.DRPolicy -} - -// DRPCsUsingDRCluster finds DRPC resources using the DRcluster. -func DRPCsUsingDRCluster(k8sclient client.Client, log logr.Logger, drcluster *rmn.DRCluster) ([]DRPCAndPolicy, error) { - drpolicies := &rmn.DRPolicyList{} - if err := k8sclient.List(context.TODO(), drpolicies); err != nil { - log.Error(err, "Failed to list DRPolicies", "drcluster", drcluster.GetName()) - - return nil, err - } - - found := []DRPCAndPolicy{} - - for i := range drpolicies.Items { - drpolicy := &drpolicies.Items[i] - - if rmnutil.DrpolicyContainsDrcluster(drpolicy, drcluster.GetName()) { - log.Info("Found DRPolicy referencing DRCluster", "drpolicy", drpolicy.GetName()) - - drpcs, err := DRPCsUsingDRPolicy(k8sclient, log, drpolicy) - if err != nil { - return nil, err - } - - for _, drpc := range drpcs { - found = append(found, DRPCAndPolicy{drpc: drpc, drPolicy: drpolicy}) - } - } - } - - return found, nil -} - -// DRPCsUsingDRPolicy finds DRPC resources that reference the DRPolicy. -func DRPCsUsingDRPolicy( - k8sclient client.Client, - log logr.Logger, - drpolicy *rmn.DRPolicy, -) ([]*rmn.DRPlacementControl, error) { - drpcs := &rmn.DRPlacementControlList{} - if err := k8sclient.List(context.TODO(), drpcs); err != nil { - log.Error(err, "Failed to list DRPCs", "drpolicy", drpolicy.GetName()) - - return nil, err - } - - found := []*rmn.DRPlacementControl{} - - for i := range drpcs.Items { - drpc := &drpcs.Items[i] - - if drpc.Spec.DRPolicyRef.Name != drpolicy.GetName() { - continue - } - - log.Info("Found DRPC referencing drpolicy", - "name", drpc.GetName(), - "namespace", drpc.GetNamespace(), - "drpolicy", drpolicy.GetName()) - - found = append(found, drpc) - } - - return found, nil -} - -// DRPCsFailingOverToCluster lists DRPC resources that are failing over to the passed in drcluster -// -//nolint:gocognit -func DRPCsFailingOverToCluster(k8sclient client.Client, log logr.Logger, drcluster string) ([]DRPCAndPolicy, error) { - drpolicies := &rmn.DRPolicyList{} - if err := k8sclient.List(context.TODO(), drpolicies); err != nil { - // TODO: If we get errors, do we still get an event later and/or for all changes from where we - // processed the last DRCluster update? - log.Error(err, "Failed to list DRPolicies") - - return nil, err - } - - drpcCollections := make([]DRPCAndPolicy, 0) - - for drpolicyIdx := range drpolicies.Items { - drpolicy := &drpolicies.Items[drpolicyIdx] - - if rmnutil.DrpolicyContainsDrcluster(drpolicy, drcluster) { - drClusters, err := GetDRClusters(context.TODO(), k8sclient, drpolicy) - if err != nil || len(drClusters) <= 1 { - log.Error(err, "Failed to get DRClusters") - - return nil, err - } - - // Skip if policy is of type metro, fake the from and to cluster - if metro, _ := dRPolicySupportsMetro(drpolicy, drClusters); metro { - log.Info("Sync DRPolicy detected, skipping!") - - break - } - - log.Info("Processing DRPolicy referencing DRCluster", "drpolicy", drpolicy.GetName()) - - drpcs, err := DRPCsFailingOverToClusterForPolicy(k8sclient, log, drpolicy, drcluster) - if err != nil { - return nil, err - } - - for idx := range drpcs { - dprcCollection := DRPCAndPolicy{ - drpc: drpcs[idx], - drPolicy: drpolicy, - } - - drpcCollections = append(drpcCollections, dprcCollection) - } - } - } - - return drpcCollections, nil -} - -// DRPCsFailingOverToClusterForPolicy filters DRPC resources that reference the DRPolicy and are failing over -// to the target cluster passed in -// -//nolint:gocognit -func DRPCsFailingOverToClusterForPolicy( - k8sclient client.Client, - log logr.Logger, - drpolicy *rmn.DRPolicy, - drcluster string, -) ([]*rmn.DRPlacementControl, error) { - drpcs := &rmn.DRPlacementControlList{} - if err := k8sclient.List(context.TODO(), drpcs); err != nil { - log.Error(err, "Failed to list DRPCs", "drpolicy", drpolicy.GetName()) - - return nil, err - } - - filteredDRPCs := make([]*rmn.DRPlacementControl, 0) - - for idx := range drpcs.Items { - drpc := &drpcs.Items[idx] - - if drpc.Spec.DRPolicyRef.Name != drpolicy.GetName() { - continue - } - - if rmnutil.ResourceIsDeleted(drpc) { - continue - } - - if !(drpc.Spec.Action == rmn.ActionFailover && drpc.Spec.FailoverCluster == drcluster) { - continue - } - - if condition := meta.FindStatusCondition(drpc.Status.Conditions, rmn.ConditionAvailable); condition != nil && - condition.Status == metav1.ConditionTrue && - condition.ObservedGeneration == drpc.Generation { - continue - } - - log.Info("DRPC detected as failing over to cluster", - "name", drpc.GetName(), - "namespace", drpc.GetNamespace(), - "drpolicy", drpolicy.GetName()) - - filteredDRPCs = append(filteredDRPCs, drpc) - } - - return filteredDRPCs, nil + APIReader client.Reader + Log logr.Logger + MCVGetter rmnutil.ManagedClusterViewGetter + Scheme *runtime.Scheme + Callback ProgressCallback + eventRecorder *rmnutil.EventReporter + savedInstanceStatus rmn.DRPlacementControlStatus + ObjStoreGetter ObjectStoreGetter + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] + numClustersQueriedSuccessfully int } // SetupWithManager sets up the controller with the Manager. // //nolint:funlen func (r *DRPlacementControlReconciler) SetupWithManager(mgr ctrl.Manager) error { - mwPred := ManifestWorkPredicateFunc() - - mwMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( - func(ctx context.Context, obj client.Object) []reconcile.Request { - mw, ok := obj.(*ocmworkv1.ManifestWork) - if !ok { - return []reconcile.Request{} - } - - ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering ManifestWork (%s/%s)", mw.Name, mw.Namespace)) - - return filterMW(mw) - })) - - mcvPred := ManagedClusterViewPredicateFunc() - - mcvMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( - func(ctx context.Context, obj client.Object) []reconcile.Request { - mcv, ok := obj.(*viewv1beta1.ManagedClusterView) - if !ok { - return []reconcile.Request{} - } - - ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering MCV (%s/%s)", mcv.Name, mcv.Namespace)) - - return filterMCV(mcv) - })) - - usrPlRulePred := PlacementRulePredicateFunc() - - usrPlRuleMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( - func(ctx context.Context, obj client.Object) []reconcile.Request { - usrPlRule, ok := obj.(*plrv1.PlacementRule) - if !ok { - return []reconcile.Request{} - } - - ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering User PlacementRule (%s/%s)", usrPlRule.Name, usrPlRule.Namespace)) - - return filterUsrPlRule(usrPlRule) - })) - - usrPlmntPred := PlacementPredicateFunc() - - usrPlmntMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( - func(ctx context.Context, obj client.Object) []reconcile.Request { - usrPlmnt, ok := obj.(*clrapiv1beta1.Placement) - if !ok { - return []reconcile.Request{} - } - - ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering User Placement (%s/%s)", usrPlmnt.Name, usrPlmnt.Namespace)) - - return filterUsrPlmnt(usrPlmnt) - })) - - drClusterPred := DRClusterPredicateFunc() - - drClusterMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( - func(ctx context.Context, obj client.Object) []reconcile.Request { - drCluster, ok := obj.(*rmn.DRCluster) - if !ok { - return []reconcile.Request{} - } - - ctrl.Log.Info(fmt.Sprintf("DRPC Map: Filtering DRCluster (%s)", drCluster.Name)) - - return r.FilterDRCluster(drCluster) - })) - - r.eventRecorder = rmnutil.NewEventReporter(mgr.GetEventRecorderFor("controller_DRPlacementControl")) - - options := ctrlcontroller.Options{ - MaxConcurrentReconciles: getMaxConcurrentReconciles(ctrl.Log), - } - if r.RateLimiter != nil { - options.RateLimiter = *r.RateLimiter - } - - return ctrl.NewControllerManagedBy(mgr). - WithOptions(options). - For(&rmn.DRPlacementControl{}). - Watches(&ocmworkv1.ManifestWork{}, mwMapFun, builder.WithPredicates(mwPred)). - Watches(&viewv1beta1.ManagedClusterView{}, mcvMapFun, builder.WithPredicates(mcvPred)). - Watches(&plrv1.PlacementRule{}, usrPlRuleMapFun, builder.WithPredicates(usrPlRulePred)). - Watches(&clrapiv1beta1.Placement{}, usrPlmntMapFun, builder.WithPredicates(usrPlmntPred)). - Watches(&rmn.DRCluster{}, drClusterMapFun, builder.WithPredicates(drClusterPred)). - Complete(r) + return r.setupWithManagerAndAddWatchers(mgr) } //nolint:lll @@ -939,11 +374,13 @@ func (r *DRPlacementControlReconciler) createDRPCInstance( return nil, err } - vrgs, _, _, err := getVRGsFromManagedClusters(r.MCVGetter, drpc, drClusters, vrgNamespace, log) + vrgs, cqs, _, err := getVRGsFromManagedClusters(r.MCVGetter, drpc, drClusters, vrgNamespace, log) if err != nil { return nil, err } + r.numClustersQueriedSuccessfully = cqs + d := &DRPCInstance{ reconciler: r, ctx: ctx, @@ -1206,18 +643,11 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r clonedPlRuleName := fmt.Sprintf(ClonedPlacementRuleNameFormat, drpc.Name, drpc.Namespace) // delete cloned placementrule, if one created. if drpc.Spec.PreferredCluster == "" { - err := r.deleteClonedPlacementRule(ctx, clonedPlRuleName, drpc.Namespace, log) - if err != nil { + if err := r.deleteClonedPlacementRule(ctx, clonedPlRuleName, drpc.Namespace, log); err != nil { return err } } - // Cleanup volsync secret-related resources (policy/plrule/binding) - err := volsync.CleanupSecretPropagation(ctx, r.Client, drpc, r.Log) - if err != nil { - return fmt.Errorf("failed to clean up volsync secret-related resources (%w)", err) - } - vrgNamespace, err := selectVRGNamespace(r.Client, r.Log, drpc, placementObj) if err != nil { return err @@ -1237,6 +667,51 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r return fmt.Errorf("failed to get DRPolicy while finalizing DRPC (%w)", err) } + // Cleanup volsync secret-related resources (policy/plrule/binding) + if err := volsync.CleanupSecretPropagation(ctx, r.Client, drpc, r.Log); err != nil { + return fmt.Errorf("failed to clean up volsync secret-related resources (%w)", err) + } + + // cleanup for VRG artifacts + if err = r.cleanupVRGs(ctx, drPolicy, log, mwu, drpc, vrgNamespace); err != nil { + return err + } + + // delete namespace manifestwork + for _, drClusterName := range rmnutil.DRPolicyClusterNames(drPolicy) { + annotations := make(map[string]string) + annotations[DRPCNameAnnotation] = drpc.Name + annotations[DRPCNamespaceAnnotation] = drpc.Namespace + + if err := mwu.DeleteNamespaceManifestWork(drClusterName, annotations); err != nil { + return err + } + } + + // delete metrics if matching labels are found + syncTimeMetricLabels := SyncTimeMetricLabels(drPolicy, drpc) + DeleteSyncTimeMetric(syncTimeMetricLabels) + + syncDurationMetricLabels := SyncDurationMetricLabels(drPolicy, drpc) + DeleteSyncDurationMetric(syncDurationMetricLabels) + + syncDataBytesMetricLabels := SyncDataBytesMetricLabels(drPolicy, drpc) + DeleteSyncDataBytesMetric(syncDataBytesMetricLabels) + + workloadProtectionLabels := WorkloadProtectionStatusLabels(drpc) + DeleteWorkloadProtectionStatusMetric(workloadProtectionLabels) + + return nil +} + +func (r *DRPlacementControlReconciler) cleanupVRGs( + ctx context.Context, + drPolicy *rmn.DRPolicy, + log logr.Logger, + mwu rmnutil.MWUtil, + drpc *rmn.DRPlacementControl, + vrgNamespace string, +) error { drClusters, err := GetDRClusters(ctx, r.Client, drPolicy) if err != nil { return fmt.Errorf("failed to get drclusters. Error (%w)", err) @@ -1252,10 +727,9 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r return fmt.Errorf("VRG adoption in progress") } - // delete manifestworks (VRGs) + // delete VRG manifestwork for _, drClusterName := range rmnutil.DRPolicyClusterNames(drPolicy) { - err := mwu.DeleteManifestWorksForCluster(drClusterName) - if err != nil { + if err := mwu.DeleteManifestWork(mwu.BuildManifestWorkName(rmnutil.MWTypeVRG), drClusterName); err != nil { return fmt.Errorf("%w", err) } } @@ -1264,24 +738,11 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r return fmt.Errorf("waiting for VRGs count to go to zero") } - // delete MCVs used in the previous call + // delete MCVs if err := r.deleteAllManagedClusterViews(drpc, rmnutil.DRPolicyClusterNames(drPolicy)); err != nil { return fmt.Errorf("error in deleting MCV (%w)", err) } - // delete metrics if matching labels are found - syncTimeMetricLabels := SyncTimeMetricLabels(drPolicy, drpc) - DeleteSyncTimeMetric(syncTimeMetricLabels) - - syncDurationMetricLabels := SyncDurationMetricLabels(drPolicy, drpc) - DeleteSyncDurationMetric(syncDurationMetricLabels) - - syncDataBytesMetricLabels := SyncDataBytesMetricLabels(drPolicy, drpc) - DeleteSyncDataBytesMetric(syncDataBytesMetricLabels) - - workloadProtectionLabels := WorkloadProtectionStatusLabels(drpc) - DeleteWorkloadProtectionStatusMetric(workloadProtectionLabels) - return nil } @@ -1639,7 +1100,7 @@ func getVRGsFromManagedClusters( annotations[DRPCNameAnnotation] = drpc.Name annotations[DRPCNamespaceAnnotation] = drpc.Namespace - var clustersQueriedSuccessfully int + var numClustersQueriedSuccessfully int var failedCluster string @@ -1651,7 +1112,7 @@ func getVRGsFromManagedClusters( // Only NotFound error is accepted if errors.IsNotFound(err) { log.Info(fmt.Sprintf("VRG not found on %q", drCluster.Name)) - clustersQueriedSuccessfully++ + numClustersQueriedSuccessfully++ continue } @@ -1663,7 +1124,7 @@ func getVRGsFromManagedClusters( continue } - clustersQueriedSuccessfully++ + numClustersQueriedSuccessfully++ if rmnutil.ResourceIsDeleted(drCluster) { log.Info("Skipping VRG on deleted drcluster", "drcluster", drCluster.Name, "vrg", vrg.Name) @@ -1677,15 +1138,15 @@ func getVRGsFromManagedClusters( } // We are done if we successfully queried all drClusters - if clustersQueriedSuccessfully == len(drClusters) { - return vrgs, clustersQueriedSuccessfully, "", nil + if numClustersQueriedSuccessfully == len(drClusters) { + return vrgs, numClustersQueriedSuccessfully, "", nil } - if clustersQueriedSuccessfully == 0 { + if numClustersQueriedSuccessfully == 0 { return vrgs, 0, "", fmt.Errorf("failed to retrieve VRGs from clusters") } - return vrgs, clustersQueriedSuccessfully, failedCluster, nil + return vrgs, numClustersQueriedSuccessfully, failedCluster, nil } func (r *DRPlacementControlReconciler) deleteClonedPlacementRule(ctx context.Context, @@ -2145,6 +1606,13 @@ func (r *DRPlacementControlReconciler) createOrUpdatePlacementDecision(ctx conte if plDecision, err = r.createPlacementDecision(ctx, placement); err != nil { return err } + } else if plDecision.GetLabels()[rmnutil.ExcludeFromVeleroBackup] != "true" { + err = rmnutil.NewResourceUpdater(plDecision). + AddLabel(rmnutil.ExcludeFromVeleroBackup, "true"). + Update(ctx, r.Client) + if err != nil { + return err + } } plDecision.Status = clrapiv1beta1.PlacementDecisionStatus{ @@ -2195,7 +1663,7 @@ func (r *DRPlacementControlReconciler) createPlacementDecision(ctx context.Conte plDecision.ObjectMeta.Labels = map[string]string{ clrapiv1beta1.PlacementLabel: placement.GetName(), - "velero.io/exclude-from-backup": "true", + rmnutil.ExcludeFromVeleroBackup: "true", } owner := metav1.NewControllerRef(placement, clrapiv1beta1.GroupVersion.WithKind("Placement")) diff --git a/internal/controller/drplacementcontrol_controller_test.go b/internal/controller/drplacementcontrol_controller_test.go index 84c807c4a..d65596972 100644 --- a/internal/controller/drplacementcontrol_controller_test.go +++ b/internal/controller/drplacementcontrol_controller_test.go @@ -26,11 +26,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - spokeClusterV1 "github.com/open-cluster-management/api/cluster/v1" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" + spokeClusterV1 "open-cluster-management.io/api/cluster/v1" + ocmworkv1 "open-cluster-management.io/api/work/v1" - clrapiv1beta1 "github.com/open-cluster-management-io/api/cluster/v1beta1" rmn "github.com/ramendr/ramen/api/v1alpha1" controllers "github.com/ramendr/ramen/internal/controller" argocdv1alpha1hack "github.com/ramendr/ramen/internal/controller/argocd" @@ -38,6 +37,7 @@ import ( plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" gppv1 "open-cluster-management.io/governance-policy-propagator/api/v1" ) @@ -172,6 +172,18 @@ var ( }, }, } + + placementDecision = &clrapiv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(controllers.PlacementDecisionName, UserPlacementName, 1), + Namespace: DefaultDRPCNamespace, + Labels: map[string]string{ + "cluster.open-cluster-management.io/decision-group-index": "0", + "cluster.open-cluster-management.io/decision-group-name": "", + "cluster.open-cluster-management.io/placement": UserPlacementName, + }, + }, + } ) func getSyncDRPolicy() *rmn.DRPolicy { @@ -691,8 +703,8 @@ func deleteUserPlacementRule(name, namespace string) { Expect(k8sClient.Delete(context.TODO(), userPlacementRule)).Should(Succeed()) } -func deleteUserPlacement(name, namespace string) { - userPlacement := getLatestUserPlacement(name, namespace) +func deleteUserPlacement() { + userPlacement := getLatestUserPlacement(UserPlacementName, DefaultDRPCNamespace) Expect(k8sClient.Delete(context.TODO(), userPlacement)).Should(Succeed()) } @@ -701,7 +713,7 @@ func deleteDRPC() { Expect(k8sClient.Delete(context.TODO(), drpc)).Should(Succeed()) } -func deleteNamespaceMWsFromAllClusters(namespace string) { +func ensureNamespaceMWsDeletedFromAllClusters(namespace string) { foundMW := &ocmworkv1.ManifestWork{} mwName := fmt.Sprintf(rmnutil.ManifestWorkNameFormat, DRPCCommonName, namespace, rmnutil.MWTypeNS) err := k8sClient.Get(context.TODO(), @@ -709,14 +721,14 @@ func deleteNamespaceMWsFromAllClusters(namespace string) { foundMW) if err == nil { - Expect(k8sClient.Delete(context.TODO(), foundMW)).Should(Succeed()) + Expect(foundMW).To(BeNil()) } err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: mwName, Namespace: West1ManagedCluster}, foundMW) if err == nil { - Expect(k8sClient.Delete(context.TODO(), foundMW)).Should(Succeed()) + Expect(foundMW).To(BeNil()) } } @@ -884,6 +896,14 @@ func createDRClusters(inClusters []*spokeClusterV1.ManagedCluster) { } } +func createPlacementDecision() { + deletePlacementDecision() + + plDecision := placementDecision.DeepCopy() + err := k8sClient.Create(context.TODO(), plDecision) + Expect(err).NotTo(HaveOccurred()) +} + func createDRClustersAsync() { createDRClusters(asyncClusters) } @@ -1123,6 +1143,7 @@ func InitialDeploymentAsync(namespace, placementName, homeCluster string, plType createManagedClusters(asyncClusters) createDRClustersAsync() createDRPolicyAsync() + createPlacementDecision() return CreatePlacementAndDRPC(namespace, placementName, homeCluster, plType) } @@ -1235,7 +1256,7 @@ func getManifestWorkCount(homeClusterNamespace string) int { return len(manifestWorkList.Items) - 1 } -func verifyNSManifestWorkBackupLabelNotExist(resourceName, namespaceString, managedCluster string) { +func verifyNSManifestWork(resourceName, namespaceString, managedCluster string) { mw := &ocmworkv1.ManifestWork{} mwName := fmt.Sprintf(rmnutil.ManifestWorkNameFormat, resourceName, namespaceString, rmnutil.MWTypeNS) err := k8sClient.Get(context.TODO(), @@ -1245,6 +1266,7 @@ func verifyNSManifestWorkBackupLabelNotExist(resourceName, namespaceString, mana Expect(err).NotTo(HaveOccurred()) Expect(mw).ToNot(BeNil()) + Expect(mw.Spec.DeleteOption).ToNot(BeNil()) Expect(mw.Labels[rmnutil.OCMBackupLabelKey]).To(Equal("")) } @@ -1643,6 +1665,20 @@ func deleteDRPolicySync() { Expect(k8sClient.Delete(context.TODO(), getSyncDRPolicy())).To(Succeed()) } +func deletePlacementDecision() { + err := k8sClient.Delete(context.TODO(), placementDecision) + Expect(client.IgnoreNotFound(err)).To(Succeed()) + + Eventually(func() bool { + resource := &clrapiv1beta1.PlacementDecision{} + + return errors.IsNotFound(apiReader.Get(context.TODO(), types.NamespacedName{ + Namespace: placementDecision.Namespace, + Name: placementDecision.Name, + }, resource)) + }, timeout, interval).Should(BeTrue()) +} + func fenceCluster(cluster string, manual bool) { latestDRCluster := getLatestDRCluster(cluster) if manual { @@ -1709,7 +1745,7 @@ func verifyInitialDRPCDeployment(userPlacement client.Object, preferredCluster s Expect(latestDRPC.GetAnnotations()[controllers.DRPCAppNamespace]). To(Equal(getVRGNamespace(userPlacement.GetNamespace()))) - verifyNSManifestWorkBackupLabelNotExist(latestDRPC.Name, getVRGNamespace(latestDRPC.Namespace), + verifyNSManifestWork(latestDRPC.Name, getVRGNamespace(latestDRPC.Namespace), East1ManagedCluster) } @@ -1748,7 +1784,7 @@ func verifyFailoverToSecondary(placementObj client.Object, toCluster string, func verifyActionResultForPlacement(placement *clrapiv1beta1.Placement, homeCluster string, plType PlacementType) { placementDecision := getPlacementDecision(placement.GetName(), placement.GetNamespace()) Expect(placementDecision).ShouldNot(BeNil()) - Expect(placementDecision.GetLabels()["velero.io/exclude-from-backup"]).Should(Equal("true")) + Expect(placementDecision.GetLabels()[rmnutil.ExcludeFromVeleroBackup]).Should(Equal("true")) Expect(placementDecision.Status.Decisions[0].ClusterName).Should(Equal(homeCluster)) vrg, err := getVRGFromManifestWork(homeCluster, placement.GetNamespace()) Expect(err).NotTo(HaveOccurred()) @@ -1978,9 +2014,9 @@ var _ = Describe("DRPlacementControl Reconciler", func() { Expect(getManifestWorkCount(East1ManagedCluster)).Should(BeElementOf(3, 4)) // DRCluster + VRG MW deleteDRPC() waitForCompletion("deleted") - Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(2)) // DRCluster + NS MW only + Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(1)) // DRCluster Expect(getManagedClusterViewCount(East1ManagedCluster)).Should(Equal(0)) // NS + VRG MCV - deleteNamespaceMWsFromAllClusters(DefaultDRPCNamespace) + ensureNamespaceMWsDeletedFromAllClusters(DefaultDRPCNamespace) }) It("should delete the DRPC causing its referenced drpolicy to be deleted"+ " by drpolicy controller since no DRPCs reference it anymore", func() { @@ -2041,7 +2077,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) When("Deleting user Placement", func() { It("Should cleanup DRPC", func() { - deleteUserPlacement(UserPlacementName, DefaultDRPCNamespace) + deleteUserPlacement() drpc := getLatestDRPC(DefaultDRPCNamespace) _, condition := getDRPCCondition(&drpc.Status, rmn.ConditionPeerReady) Expect(condition).NotTo(BeNil()) @@ -2052,9 +2088,9 @@ var _ = Describe("DRPlacementControl Reconciler", func() { Expect(getManifestWorkCount(East1ManagedCluster)).Should(BeElementOf(3, 4)) // DRCluster + VRG + NS MW deleteDRPC() waitForCompletion("deleted") - Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(2)) // DRCluster + NS MW only + Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(1)) // DRCluster Expect(getManagedClusterViewCount(East1ManagedCluster)).Should(Equal(0)) // NS + VRG MCV - deleteNamespaceMWsFromAllClusters(DefaultDRPCNamespace) + ensureNamespaceMWsDeletedFromAllClusters(DefaultDRPCNamespace) }) It("should delete the DRPC causing its referenced drpolicy to be deleted"+ " by drpolicy controller since no DRPCs reference it anymore", func() { @@ -2129,7 +2165,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) When("Deleting user Placement", func() { It("Should cleanup DRPC", func() { - deleteUserPlacement(UserPlacementName, DefaultDRPCNamespace) + deleteUserPlacement() drpc := getLatestDRPC(DefaultDRPCNamespace) _, condition := getDRPCCondition(&drpc.Status, rmn.ConditionPeerReady) Expect(condition).NotTo(BeNil()) @@ -2140,9 +2176,9 @@ var _ = Describe("DRPlacementControl Reconciler", func() { Expect(getManifestWorkCount(East1ManagedCluster)).Should(BeElementOf(3, 4)) // DRCluster + VRG + NS MW deleteDRPC() waitForCompletion("deleted") - Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(2)) // DRCluster + NS MW only + Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(1)) // DRCluster Expect(getManagedClusterViewCount(East1ManagedCluster)).Should(Equal(0)) // NS + VRG MCV - deleteNamespaceMWsFromAllClusters(ApplicationNamespace) + ensureNamespaceMWsDeletedFromAllClusters(ApplicationNamespace) deleteAppSet() UseApplicationSet = false }) @@ -2224,7 +2260,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { By("\n\n*** DELETE DRPC ***\n\n") deleteDRPC() waitForCompletion("deleted") - Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(2)) // DRCluster+NS MW + Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(1)) // DRCluster deleteDRPolicySync() deleteDRClustersSync() }) @@ -2295,17 +2331,16 @@ var _ = Describe("DRPlacementControl Reconciler", func() { Expect(getManifestWorkCount(East1ManagedCluster)).Should(BeElementOf(3, 4)) // DRCluster + NS + VRG MW deleteDRPC() waitForCompletion("deleted") - Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(2)) // DRCluster + NS MW + Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(1)) // DRCluster deleteDRPolicySync() deleteDRClustersSync() - deleteNamespaceMWsFromAllClusters(DefaultDRPCNamespace) + ensureNamespaceMWsDeletedFromAllClusters(DefaultDRPCNamespace) }) }) }) Context("DRPlacementControl Reconciler HubRecovery (Subscription)", func() { var userPlacementRule1 *plrv1.PlacementRule - var drpc1 *rmn.DRPlacementControl Specify("DRClusters", func() { populateDRClusters() @@ -2318,7 +2353,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { createDRPolicyAsync() var placementObj client.Object - placementObj, drpc1 = CreatePlacementAndDRPC( + placementObj, _ = CreatePlacementAndDRPC( DefaultDRPCNamespace, UserPlacementRuleName, East1ManagedCluster, UsePlacementRule) userPlacementRule1 = placementObj.(*plrv1.PlacementRule) Expect(userPlacementRule1).NotTo(BeNil()) @@ -2444,8 +2479,8 @@ var _ = Describe("DRPlacementControl Reconciler", func() { clearFakeUserPlacementRuleStatus(UserPlacementRuleName, DefaultDRPCNamespace) clearDRPCStatus() expectedAction := rmn.ActionRelocate - expectedPhase := rmn.Relocated - exptectedPorgression := rmn.ProgressionCleaningUp + expectedPhase := rmn.DRState("") + exptectedPorgression := rmn.ProgressionStatus("") verifyDRPCStateAndProgression(expectedAction, expectedPhase, exptectedPorgression) // User intervention is required (simulate user intervention) @@ -2508,8 +2543,9 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("Deleting DRPC", func() { It("Should delete all VRGs", func() { - Expect(k8sClient.Delete(context.TODO(), drpc1)).Should(Succeed()) - deleteNamespaceMWsFromAllClusters(DefaultDRPCNamespace) + deleteDRPC() + waitForCompletion("deleted") + ensureNamespaceMWsDeletedFromAllClusters(DefaultDRPCNamespace) }) }) Specify("delete drclusters", func() { @@ -2519,7 +2555,6 @@ var _ = Describe("DRPlacementControl Reconciler", func() { Context("DRPlacementControl Reconciler HubRecovery VRG Adoption (Subscription)", func() { var userPlacementRule1 *plrv1.PlacementRule - var drpc1 *rmn.DRPlacementControl Specify("DRClusters", func() { populateDRClusters() @@ -2538,7 +2573,7 @@ var _ = Describe("DRPlacementControl Reconciler", func() { createVRGMW(DRPCCommonName, DefaultDRPCNamespace, East1ManagedCluster) var placementObj client.Object - placementObj, drpc1 = CreatePlacementAndDRPC( + placementObj, _ = CreatePlacementAndDRPC( DefaultDRPCNamespace, UserPlacementRuleName, East1ManagedCluster, UsePlacementRule) userPlacementRule1 = placementObj.(*plrv1.PlacementRule) Expect(userPlacementRule1).NotTo(BeNil()) @@ -2564,8 +2599,9 @@ var _ = Describe("DRPlacementControl Reconciler", func() { When("Deleting DRPC", func() { It("Should delete all VRGs", func() { - Expect(k8sClient.Delete(context.TODO(), drpc1)).Should(Succeed()) - deleteNamespaceMWsFromAllClusters(DefaultDRPCNamespace) + deleteDRPC() + waitForCompletion("deleted") + ensureNamespaceMWsDeletedFromAllClusters(DefaultDRPCNamespace) }) }) @@ -2604,12 +2640,12 @@ var _ = Describe("DRPlacementControl Reconciler", func() { }) }) Specify("Cleanup after tests", func() { - deleteUserPlacement(UserPlacementName, DefaultDRPCNamespace) + deleteUserPlacement() deleteDRPC() waitForCompletion("deleted") - Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(2)) // DRCluster + NS MW only + Expect(getManifestWorkCount(East1ManagedCluster)).Should(Equal(1)) // DRCluster Expect(getManagedClusterViewCount(East1ManagedCluster)).Should(Equal(0)) // NS + VRG MCV - deleteNamespaceMWsFromAllClusters(DefaultDRPCNamespace) + ensureNamespaceMWsDeletedFromAllClusters(DefaultDRPCNamespace) deleteDRPolicyAsync() ensureDRPolicyIsDeleted(drpc.Spec.DRPolicyRef.Name) deleteDRClustersAsync() diff --git a/internal/controller/drplacementcontrol_watcher.go b/internal/controller/drplacementcontrol_watcher.go new file mode 100644 index 000000000..3abc009d1 --- /dev/null +++ b/internal/controller/drplacementcontrol_watcher.go @@ -0,0 +1,673 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package controllers + +import ( + "context" + "fmt" + "reflect" + + "github.com/go-logr/logr" + viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" + plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ocmworkv1 "open-cluster-management.io/api/work/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + rmn "github.com/ramendr/ramen/api/v1alpha1" + rmnutil "github.com/ramendr/ramen/internal/controller/util" + clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +func ManifestWorkPredicateFunc() predicate.Funcs { + mwPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + log := ctrl.Log.WithName("Predicate").WithName("ManifestWork") + + oldMW, ok := e.ObjectOld.DeepCopyObject().(*ocmworkv1.ManifestWork) + if !ok { + log.Info("Failed to deep copy older ManifestWork") + + return false + } + newMW, ok := e.ObjectNew.DeepCopyObject().(*ocmworkv1.ManifestWork) + if !ok { + log.Info("Failed to deep copy newer ManifestWork") + + return false + } + + log.Info(fmt.Sprintf("Update event for MW %s/%s", oldMW.Name, oldMW.Namespace)) + + return !reflect.DeepEqual(oldMW.Status, newMW.Status) + }, + } + + return mwPredicate +} + +func filterMW(mw *ocmworkv1.ManifestWork) []ctrl.Request { + if mw.Annotations[DRPCNameAnnotation] == "" || + mw.Annotations[DRPCNamespaceAnnotation] == "" { + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: mw.Annotations[DRPCNameAnnotation], + Namespace: mw.Annotations[DRPCNamespaceAnnotation], + }, + }, + } +} + +func ManagedClusterViewPredicateFunc() predicate.Funcs { + log := ctrl.Log.WithName("Predicate").WithName("MCV") + mcvPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldMCV, ok := e.ObjectOld.DeepCopyObject().(*viewv1beta1.ManagedClusterView) + if !ok { + log.Info("Failed to deep copy older MCV") + + return false + } + newMCV, ok := e.ObjectNew.DeepCopyObject().(*viewv1beta1.ManagedClusterView) + if !ok { + log.Info("Failed to deep copy newer MCV") + + return false + } + + log.Info(fmt.Sprintf("Update event for MCV %s/%s", oldMCV.Name, oldMCV.Namespace)) + + return !reflect.DeepEqual(oldMCV.Status, newMCV.Status) + }, + } + + return mcvPredicate +} + +func filterMCV(mcv *viewv1beta1.ManagedClusterView) []ctrl.Request { + if mcv.Annotations[DRPCNameAnnotation] == "" || + mcv.Annotations[DRPCNamespaceAnnotation] == "" { + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: mcv.Annotations[DRPCNameAnnotation], + Namespace: mcv.Annotations[DRPCNamespaceAnnotation], + }, + }, + } +} + +func PlacementRulePredicateFunc() predicate.Funcs { + log := ctrl.Log.WithName("DRPCPredicate").WithName("UserPlRule") + usrPlRulePredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + log.Info("Delete event") + + return true + }, + } + + return usrPlRulePredicate +} + +func filterUsrPlRule(usrPlRule *plrv1.PlacementRule) []ctrl.Request { + if usrPlRule.Annotations[DRPCNameAnnotation] == "" || + usrPlRule.Annotations[DRPCNamespaceAnnotation] == "" { + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: usrPlRule.Annotations[DRPCNameAnnotation], + Namespace: usrPlRule.Annotations[DRPCNamespaceAnnotation], + }, + }, + } +} + +func PlacementPredicateFunc() predicate.Funcs { + log := ctrl.Log.WithName("DRPCPredicate").WithName("UserPlmnt") + usrPlmntPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + log.Info("Delete event") + + return true + }, + } + + return usrPlmntPredicate +} + +func filterUsrPlmnt(usrPlmnt *clrapiv1beta1.Placement) []ctrl.Request { + if usrPlmnt.Annotations[DRPCNameAnnotation] == "" || + usrPlmnt.Annotations[DRPCNamespaceAnnotation] == "" { + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: usrPlmnt.Annotations[DRPCNameAnnotation], + Namespace: usrPlmnt.Annotations[DRPCNamespaceAnnotation], + }, + }, + } +} + +func DRClusterPredicateFunc() predicate.Funcs { + log := ctrl.Log.WithName("DRPCPredicate").WithName("DRCluster") + drClusterPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + log.Info("Update event") + + return DRClusterUpdateOfInterest(e.ObjectOld.(*rmn.DRCluster), e.ObjectNew.(*rmn.DRCluster)) + }, + } + + return drClusterPredicate +} + +func DRPolicyPredicateFunc() predicate.Funcs { + log := ctrl.Log.WithName("DRPCPredicate").WithName("DRPolicy") + drPolicyPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + log.Info("Update event") + + return RequiresDRPCReconciliation(e.ObjectOld.(*rmn.DRPolicy), e.ObjectNew.(*rmn.DRPolicy)) + }, + } + + return drPolicyPredicate +} + +// DRClusterUpdateOfInterest checks if the new DRCluster resource as compared to the older version +// requires any attention, it checks for the following updates: +// - If any maintenance mode is reported as activated +// - If drcluster was marked for deletion +// +// TODO: Needs some logs for easier troubleshooting +func DRClusterUpdateOfInterest(oldDRCluster, newDRCluster *rmn.DRCluster) bool { + for _, mModeNew := range newDRCluster.Status.MaintenanceModes { + // Check if new conditions have failover activated, if not this maintenance mode is NOT of interest + conditionNew := getFailoverActivatedCondition(mModeNew) + if conditionNew == nil || + conditionNew.Status == metav1.ConditionFalse || + conditionNew.Status == metav1.ConditionUnknown { + continue + } + + // Check if failover maintenance mode was already activated as part of an older update to DRCluster, if NOT + // this change is of interest + if activated := checkFailoverActivation(oldDRCluster, mModeNew.StorageProvisioner, mModeNew.TargetID); !activated { + return true + } + } + + // Exhausted all failover activation checks, the only interesting update is deleting a drcluster. + return rmnutil.ResourceIsDeleted(newDRCluster) +} + +// RequiresDRPCReconciliation determines if the updated DRPolicy resource, compared to the previous version, +// requires reconciliation of the DRPCs. Reconciliation is needed if the DRPolicy has been newly activated. +// This check helps avoid delays in reconciliation by ensuring timely updates when necessary. +func RequiresDRPCReconciliation(oldDRPolicy, newDRPolicy *rmn.DRPolicy) bool { + err1 := rmnutil.DrpolicyValidated(oldDRPolicy) + err2 := rmnutil.DrpolicyValidated(newDRPolicy) + + return err1 != err2 +} + +// checkFailoverActivation checks if provided provisioner and storage instance is activated as per the +// passed in DRCluster resource status. It currently only checks for: +// - Failover activation condition +func checkFailoverActivation(drcluster *rmn.DRCluster, provisioner string, targetID string) bool { + for _, mMode := range drcluster.Status.MaintenanceModes { + if !(mMode.StorageProvisioner == provisioner && mMode.TargetID == targetID) { + continue + } + + condition := getFailoverActivatedCondition(mMode) + if condition == nil || + condition.Status == metav1.ConditionFalse || + condition.Status == metav1.ConditionUnknown { + return false + } + + return true + } + + return false +} + +// getFailoverActivatedCondition is a helper routine that returns the FailoverActivated condition +// from a given ClusterMaintenanceMode if found, or nil otherwise +func getFailoverActivatedCondition(mMode rmn.ClusterMaintenanceMode) *metav1.Condition { + for _, condition := range mMode.Conditions { + if condition.Type != string(rmn.MModeConditionFailoverActivated) { + continue + } + + return &condition + } + + return nil +} + +// FilterDRCluster filters for DRPC resources that should be reconciled due to a DRCluster watch event +func (r *DRPlacementControlReconciler) FilterDRCluster(drcluster *rmn.DRCluster) []ctrl.Request { + log := ctrl.Log.WithName("DRPCFilter").WithName("DRCluster").WithValues("cluster", drcluster) + + var drpcCollections []DRPCAndPolicy + + var err error + + if rmnutil.ResourceIsDeleted(drcluster) { + drpcCollections, err = DRPCsUsingDRCluster(r.Client, log, drcluster) + } else { + drpcCollections, err = DRPCsFailingOverToCluster(r.Client, log, drcluster.GetName()) + } + + if err != nil { + log.Info("Failed to process filter") + + return nil + } + + requests := make([]reconcile.Request, 0) + for idx := range drpcCollections { + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: drpcCollections[idx].drpc.GetName(), + Namespace: drpcCollections[idx].drpc.GetNamespace(), + }, + }) + } + + return requests +} + +type DRPCAndPolicy struct { + drpc *rmn.DRPlacementControl + drPolicy *rmn.DRPolicy +} + +// DRPCsUsingDRCluster finds DRPC resources using the DRcluster. +func DRPCsUsingDRCluster(k8sclient client.Client, log logr.Logger, drcluster *rmn.DRCluster) ([]DRPCAndPolicy, error) { + drpolicies := &rmn.DRPolicyList{} + if err := k8sclient.List(context.TODO(), drpolicies); err != nil { + log.Error(err, "Failed to list DRPolicies", "drcluster", drcluster.GetName()) + + return nil, err + } + + found := []DRPCAndPolicy{} + + for i := range drpolicies.Items { + drpolicy := &drpolicies.Items[i] + + if rmnutil.DrpolicyContainsDrcluster(drpolicy, drcluster.GetName()) { + log.Info("Found DRPolicy referencing DRCluster", "drpolicy", drpolicy.GetName()) + + drpcs, err := DRPCsUsingDRPolicy(k8sclient, log, drpolicy) + if err != nil { + return nil, err + } + + for _, drpc := range drpcs { + found = append(found, DRPCAndPolicy{drpc: drpc, drPolicy: drpolicy}) + } + } + } + + return found, nil +} + +// DRPCsUsingDRPolicy finds DRPC resources that reference the DRPolicy. +func DRPCsUsingDRPolicy( + k8sclient client.Client, + log logr.Logger, + drpolicy *rmn.DRPolicy, +) ([]*rmn.DRPlacementControl, error) { + drpcs := &rmn.DRPlacementControlList{} + if err := k8sclient.List(context.TODO(), drpcs); err != nil { + log.Error(err, "Failed to list DRPCs", "drpolicy", drpolicy.GetName()) + + return nil, err + } + + found := []*rmn.DRPlacementControl{} + + for i := range drpcs.Items { + drpc := &drpcs.Items[i] + + if drpc.Spec.DRPolicyRef.Name != drpolicy.GetName() { + continue + } + + log.Info("Found DRPC referencing drpolicy", + "name", drpc.GetName(), + "namespace", drpc.GetNamespace(), + "drpolicy", drpolicy.GetName()) + + found = append(found, drpc) + } + + return found, nil +} + +// DRPCsFailingOverToCluster lists DRPC resources that are failing over to the passed in drcluster +// +//nolint:gocognit +func DRPCsFailingOverToCluster(k8sclient client.Client, log logr.Logger, drcluster string) ([]DRPCAndPolicy, error) { + drpolicies := &rmn.DRPolicyList{} + if err := k8sclient.List(context.TODO(), drpolicies); err != nil { + // TODO: If we get errors, do we still get an event later and/or for all changes from where we + // processed the last DRCluster update? + log.Error(err, "Failed to list DRPolicies") + + return nil, err + } + + drpcCollections := make([]DRPCAndPolicy, 0) + + for drpolicyIdx := range drpolicies.Items { + drpolicy := &drpolicies.Items[drpolicyIdx] + + if rmnutil.DrpolicyContainsDrcluster(drpolicy, drcluster) { + drClusters, err := GetDRClusters(context.TODO(), k8sclient, drpolicy) + if err != nil || len(drClusters) <= 1 { + log.Error(err, "Failed to get DRClusters") + + return nil, err + } + + // Skip if policy is of type metro, fake the from and to cluster + if metro, _ := dRPolicySupportsMetro(drpolicy, drClusters); metro { + log.Info("Sync DRPolicy detected, skipping!") + + break + } + + log.Info("Processing DRPolicy referencing DRCluster", "drpolicy", drpolicy.GetName()) + + drpcs, err := DRPCsFailingOverToClusterForPolicy(k8sclient, log, drpolicy, drcluster) + if err != nil { + return nil, err + } + + for idx := range drpcs { + dprcCollection := DRPCAndPolicy{ + drpc: drpcs[idx], + drPolicy: drpolicy, + } + + drpcCollections = append(drpcCollections, dprcCollection) + } + } + } + + return drpcCollections, nil +} + +// DRPCsFailingOverToClusterForPolicy filters DRPC resources that reference the DRPolicy and are failing over +// to the target cluster passed in +// +//nolint:gocognit +func DRPCsFailingOverToClusterForPolicy( + k8sclient client.Client, + log logr.Logger, + drpolicy *rmn.DRPolicy, + drcluster string, +) ([]*rmn.DRPlacementControl, error) { + drpcs := &rmn.DRPlacementControlList{} + if err := k8sclient.List(context.TODO(), drpcs); err != nil { + log.Error(err, "Failed to list DRPCs", "drpolicy", drpolicy.GetName()) + + return nil, err + } + + filteredDRPCs := make([]*rmn.DRPlacementControl, 0) + + for idx := range drpcs.Items { + drpc := &drpcs.Items[idx] + + if drpc.Spec.DRPolicyRef.Name != drpolicy.GetName() { + continue + } + + if rmnutil.ResourceIsDeleted(drpc) { + continue + } + + if !(drpc.Spec.Action == rmn.ActionFailover && drpc.Spec.FailoverCluster == drcluster) { + continue + } + + if condition := meta.FindStatusCondition(drpc.Status.Conditions, rmn.ConditionAvailable); condition != nil && + condition.Status == metav1.ConditionTrue && + condition.ObservedGeneration == drpc.Generation { + continue + } + + log.Info("DRPC detected as failing over to cluster", + "name", drpc.GetName(), + "namespace", drpc.GetNamespace(), + "drpolicy", drpolicy.GetName()) + + filteredDRPCs = append(filteredDRPCs, drpc) + } + + return filteredDRPCs, nil +} + +// FilterDRPCsForDRPolicyUpdate filters and returns the DRPC resources that need reconciliation +// in response to a DRPolicy update event. This ensures that only relevant DRPCs are processed +// based on the changes in the associated DRPolicy. +func (r *DRPlacementControlReconciler) FilterDRPCsForDRPolicyUpdate(drpolicy *rmn.DRPolicy) []ctrl.Request { + log := ctrl.Log.WithName("DRPCFilter").WithName("DRPolicy").WithValues("policy", drpolicy) + + drpcs := &rmn.DRPlacementControlList{} + + err := r.List(context.TODO(), drpcs) + if err != nil { + log.Info("Failed to process DRPolicy filter") + + return []ctrl.Request{} + } + + requests := make([]reconcile.Request, 0) + + for _, drpc := range drpcs.Items { + if drpc.Spec.DRPolicyRef.Name == drpolicy.GetName() { + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: drpc.GetName(), + Namespace: drpc.GetNamespace(), + }, + }) + } + } + + return requests +} + +//nolint:funlen +func (r *DRPlacementControlReconciler) setupWithManagerAndAddWatchers(mgr ctrl.Manager) error { + mwPred := ManifestWorkPredicateFunc() + + mwMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + mw, ok := obj.(*ocmworkv1.ManifestWork) + if !ok { + return []reconcile.Request{} + } + + ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering ManifestWork (%s/%s)", mw.Name, mw.Namespace)) + + return filterMW(mw) + })) + + mcvPred := ManagedClusterViewPredicateFunc() + + mcvMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + mcv, ok := obj.(*viewv1beta1.ManagedClusterView) + if !ok { + return []reconcile.Request{} + } + + ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering MCV (%s/%s)", mcv.Name, mcv.Namespace)) + + return filterMCV(mcv) + })) + + usrPlRulePred := PlacementRulePredicateFunc() + + usrPlRuleMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + usrPlRule, ok := obj.(*plrv1.PlacementRule) + if !ok { + return []reconcile.Request{} + } + + ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering User PlacementRule (%s/%s)", usrPlRule.Name, usrPlRule.Namespace)) + + return filterUsrPlRule(usrPlRule) + })) + + usrPlmntPred := PlacementPredicateFunc() + + usrPlmntMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + usrPlmnt, ok := obj.(*clrapiv1beta1.Placement) + if !ok { + return []reconcile.Request{} + } + + ctrl.Log.Info(fmt.Sprintf("DRPC: Filtering User Placement (%s/%s)", usrPlmnt.Name, usrPlmnt.Namespace)) + + return filterUsrPlmnt(usrPlmnt) + })) + + drClusterPred := DRClusterPredicateFunc() + + drClusterMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + drCluster, ok := obj.(*rmn.DRCluster) + if !ok { + return []reconcile.Request{} + } + + ctrl.Log.Info(fmt.Sprintf("DRPC Map: Filtering DRCluster (%s)", drCluster.Name)) + + return r.FilterDRCluster(drCluster) + })) + + drPolicyPred := DRPolicyPredicateFunc() + + drPolicyMapFun := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + drPolicy, ok := obj.(*rmn.DRPolicy) + if !ok { + return []reconcile.Request{} + } + + ctrl.Log.Info(fmt.Sprintf("DRPC Map: Filtering DRPolicy (%s)", drPolicy.Name)) + + return r.FilterDRPCsForDRPolicyUpdate(drPolicy) + })) + + r.eventRecorder = rmnutil.NewEventReporter(mgr.GetEventRecorderFor("controller_DRPlacementControl")) + + options := ctrlcontroller.Options{ + MaxConcurrentReconciles: getMaxConcurrentReconciles(ctrl.Log), + } + if r.RateLimiter != nil { + options.RateLimiter = *r.RateLimiter + } + + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&rmn.DRPlacementControl{}). + Watches(&ocmworkv1.ManifestWork{}, mwMapFun, builder.WithPredicates(mwPred)). + Watches(&viewv1beta1.ManagedClusterView{}, mcvMapFun, builder.WithPredicates(mcvPred)). + Watches(&plrv1.PlacementRule{}, usrPlRuleMapFun, builder.WithPredicates(usrPlRulePred)). + Watches(&clrapiv1beta1.Placement{}, usrPlmntMapFun, builder.WithPredicates(usrPlmntPred)). + Watches(&rmn.DRCluster{}, drClusterMapFun, builder.WithPredicates(drClusterPred)). + Watches(&rmn.DRPolicy{}, drPolicyMapFun, builder.WithPredicates(drPolicyPred)). + Complete(r) +} diff --git a/internal/controller/drplacementcontrolvolsync.go b/internal/controller/drplacementcontrolvolsync.go index 792fc8a1c..6e5b6624d 100644 --- a/internal/controller/drplacementcontrolvolsync.go +++ b/internal/controller/drplacementcontrolvolsync.go @@ -44,16 +44,16 @@ func (d *DRPCInstance) ensureVolSyncReplicationCommon(srcCluster string) error { // Make sure we have Source and Destination VRGs - Source should already have been created at this point d.setProgression(rmn.ProgressionEnsuringVolSyncSetup) + // Create or update the destination VRG + err := d.createVolSyncDestManifestWork(srcCluster) + if err != nil { + return err + } + vrgMWCount := d.mwu.GetVRGManifestWorkCount(rmnutil.DRPolicyClusterNames(d.drPolicy)) const maxNumberOfVRGs = 2 if len(d.vrgs) != maxNumberOfVRGs || vrgMWCount != maxNumberOfVRGs { - // Create the destination VRG - err := d.createVolSyncDestManifestWork(srcCluster) - if err != nil { - return err - } - return WaitForVolSyncManifestWorkCreation } diff --git a/internal/controller/drpolicy_controller.go b/internal/controller/drpolicy_controller.go index de28aa5aa..a51fb896c 100644 --- a/internal/controller/drpolicy_controller.go +++ b/internal/controller/drpolicy_controller.go @@ -36,7 +36,7 @@ type DRPolicyReconciler struct { Log logr.Logger Scheme *runtime.Scheme ObjectStoreGetter ObjectStoreGetter - RateLimiter *workqueue.RateLimiter + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] } // ReasonValidationFailed is set when the DRPolicy could not be validated or is not valid diff --git a/internal/controller/kubeobjects/requests.go b/internal/controller/kubeobjects/requests.go index 3454fe2bc..f889790c3 100644 --- a/internal/controller/kubeobjects/requests.go +++ b/internal/controller/kubeobjects/requests.go @@ -93,10 +93,10 @@ type HookSpec struct { Type string `json:"type,omitempty"` - Command []string `json:"command,omitempty"` + Command string `json:"command,omitempty"` //+optional - Timeout *metav1.Duration `json:"timeout,omitempty"` + Timeout int `json:"timeout,omitempty"` //+optional Container *string `json:"container,omitempty"` diff --git a/internal/controller/kubeobjects/velero/requests.go b/internal/controller/kubeobjects/velero/requests.go index a0dbbfb30..d5067e2ea 100644 --- a/internal/controller/kubeobjects/velero/requests.go +++ b/internal/controller/kubeobjects/velero/requests.go @@ -13,6 +13,7 @@ package velero import ( "context" "errors" + "time" "github.com/go-logr/logr" pkgerrors "github.com/pkg/errors" @@ -371,7 +372,8 @@ func getBackupSpecFromObjectsSpec(objectsSpec kubeobjects.Spec) velero.BackupSpe IncludedResources: objectsSpec.IncludedResources, // exclude VRs from Backup so VRG can create them: see https://github.com/RamenDR/ramen/issues/884 ExcludedResources: append(objectsSpec.ExcludedResources, "volumereplications.replication.storage.openshift.io", - "replicationsources.volsync.backube", "replicationdestinations.volsync.backube"), + "replicationsources.volsync.backube", "replicationdestinations.volsync.backube", + "PersistentVolumeClaims", "PersistentVolumes"), LabelSelector: objectsSpec.LabelSelector, OrLabelSelectors: objectsSpec.OrLabelSelectors, TTL: metav1.Duration{}, // TODO: set default here @@ -397,8 +399,8 @@ func getBackupHooks(hooks []kubeobjects.HookSpec) velero.BackupHooks { { Exec: &velero.ExecHook{ Container: dereferenceOrZeroValueIfNil(hook.Container), - Timeout: dereferenceOrZeroValueIfNil(hook.Timeout), - Command: hook.Command, + Timeout: metav1.Duration{Duration: time.Duration(hook.Timeout)}, + Command: []string{hook.Command}, }, }, }, diff --git a/internal/controller/protectedvolumereplicationgrouplist_controller.go b/internal/controller/protectedvolumereplicationgrouplist_controller.go index e443ae31d..4728012e3 100644 --- a/internal/controller/protectedvolumereplicationgrouplist_controller.go +++ b/internal/controller/protectedvolumereplicationgrouplist_controller.go @@ -15,6 +15,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,7 +29,7 @@ type ProtectedVolumeReplicationGroupListReconciler struct { APIReader client.Reader ObjStoreGetter ObjectStoreGetter Scheme *runtime.Scheme - RateLimiter *workqueue.RateLimiter + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] } type ProtectedVolumeReplicationGroupListInstance struct { diff --git a/internal/controller/ramenconfig_test.go b/internal/controller/ramenconfig_test.go index 03401ffe8..9ca218075 100644 --- a/internal/controller/ramenconfig_test.go +++ b/internal/controller/ramenconfig_test.go @@ -6,11 +6,11 @@ package controllers_test import ( "context" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ramen "github.com/ramendr/ramen/api/v1alpha1" controllers "github.com/ramendr/ramen/internal/controller" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" @@ -26,10 +26,30 @@ func configMapCreate(ramenConfig *ramen.RamenConfig) { configMap, err := controllers.ConfigMapNew(ramenNamespace, configMapName, ramenConfig) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient.Create(context.TODO(), configMap)).To(Succeed()) - DeferCleanup(k8sClient.Delete, context.TODO(), configMap) } } +func configMapDelete() error { + for _, configMapName := range configMapNames { + cm := &corev1.ConfigMap{} + + err := k8sClient.Get(context.TODO(), types.NamespacedName{ + Namespace: ramenNamespace, + Name: configMapName, + }, cm) + if err != nil && !errors.IsNotFound(err) { + return err + } + + err = k8sClient.Delete(context.TODO(), cm) + if err != nil && !errors.IsNotFound(err) { + return err + } + } + + return nil +} + func configMapUpdate() { ramenConfigYaml, err := yaml.Marshal(ramenConfig) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/replicationgroupdestination_controller.go b/internal/controller/replicationgroupdestination_controller.go index cff0d5390..461ceabae 100644 --- a/internal/controller/replicationgroupdestination_controller.go +++ b/internal/controller/replicationgroupdestination_controller.go @@ -8,7 +8,7 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/backube/volsync/controllers/statemachine" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/cephfscg" "github.com/ramendr/ramen/internal/controller/util" diff --git a/internal/controller/replicationgroupsource_controller.go b/internal/controller/replicationgroupsource_controller.go index d92cc89fe..81e1abd54 100644 --- a/internal/controller/replicationgroupsource_controller.go +++ b/internal/controller/replicationgroupsource_controller.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" + vgsv1alphfa1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" diff --git a/internal/controller/status.go b/internal/controller/status.go index 0c4d84dfc..31547c9d2 100644 --- a/internal/controller/status.go +++ b/internal/controller/status.go @@ -50,25 +50,26 @@ const ( // VRG condition reasons const ( - VRGConditionReasonUnused = "Unused" - VRGConditionReasonInitializing = "Initializing" - VRGConditionReasonReplicating = "Replicating" - VRGConditionReasonReplicated = "Replicated" - VRGConditionReasonReady = "Ready" - VRGConditionReasonDataProtected = "DataProtected" - VRGConditionReasonProgressing = "Progressing" - VRGConditionReasonClusterDataRestored = "Restored" - VRGConditionReasonError = "Error" - VRGConditionReasonErrorUnknown = "UnknownError" - VRGConditionReasonUploading = "Uploading" - VRGConditionReasonUploaded = "Uploaded" - VRGConditionReasonUploadError = "UploadError" - VRGConditionReasonVolSyncRepSourceInited = "SourceInitialized" - VRGConditionReasonVolSyncRepDestInited = "DestinationInitialized" - VRGConditionReasonVolSyncPVsRestored = "Restored" - VRGConditionReasonVolSyncFinalSyncInProgress = "Syncing" - VRGConditionReasonVolSyncFinalSyncComplete = "Synced" - VRGConditionReasonClusterDataAnnotationFailed = "AnnotationFailed" + VRGConditionReasonUnused = "Unused" + VRGConditionReasonInitializing = "Initializing" + VRGConditionReasonReplicating = "Replicating" + VRGConditionReasonReplicated = "Replicated" + VRGConditionReasonReady = "Ready" + VRGConditionReasonDataProtected = "DataProtected" + VRGConditionReasonProgressing = "Progressing" + VRGConditionReasonClusterDataRestored = "Restored" + VRGConditionReasonError = "Error" + VRGConditionReasonErrorUnknown = "UnknownError" + VRGConditionReasonUploading = "Uploading" + VRGConditionReasonUploaded = "Uploaded" + VRGConditionReasonUploadError = "UploadError" + VRGConditionReasonVolSyncRepSourceInited = "SourceInitialized" + VRGConditionReasonVolSyncRepDestInited = "DestinationInitialized" + VRGConditionReasonVolSyncPVsRestored = "Restored" + VRGConditionReasonVolSyncFinalSyncInProgress = "Syncing" + VRGConditionReasonVolSyncFinalSyncComplete = "Synced" + VRGConditionReasonClusterDataAnnotationFailed = "AnnotationFailed" + VRGConditionReasonPVOrPVCOwnedByDifferentOwner = "PVOrPVCOwnerDifferentError" ) const clusterDataProtectedTrueMessage = "Kube objects protected" diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index ed7329a25..7f26d6645 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -28,19 +28,19 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" - ocmclv1 "github.com/open-cluster-management/api/cluster/v1" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" plrv1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" + ocmclv1 "open-cluster-management.io/api/cluster/v1" + ocmworkv1 "open-cluster-management.io/api/work/v1" cpcv1 "open-cluster-management.io/config-policy-controller/api/v1" gppv1 "open-cluster-management.io/governance-policy-propagator/api/v1" - clrapiv1beta1 "github.com/open-cluster-management-io/api/cluster/v1beta1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" ramencontrollers "github.com/ramendr/ramen/internal/controller" argocdv1alpha1hack "github.com/ramendr/ramen/internal/controller/argocd" @@ -48,6 +48,8 @@ import ( Recipe "github.com/ramendr/recipe/api/v1alpha1" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" // +kubebuilder:scaffold:imports ) @@ -211,6 +213,9 @@ var _ = BeforeSuite(func() { err = clrapiv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = clusterv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = argocdv1alpha1hack.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) @@ -242,6 +247,7 @@ var _ = BeforeSuite(func() { ramenConfig.DrClusterOperator.S3SecretDistributionEnabled = true ramenConfig.MultiNamespace.FeatureEnabled = true configMapCreate(ramenConfig) + DeferCleanup(configMapDelete) s3Secrets[0] = corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Namespace: ramenNamespace, Name: "s3secret0"}, @@ -323,8 +329,8 @@ var _ = BeforeSuite(func() { err = util.IndexFieldsForVSHandler(context.TODO(), k8sManager.GetFieldIndexer()) Expect(err).ToNot(HaveOccurred()) - rateLimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(10*time.Millisecond, 100*time.Millisecond), + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](10*time.Millisecond, 100*time.Millisecond), ) Expect((&ramencontrollers.DRClusterReconciler{ diff --git a/internal/controller/util/cephfs_cg.go b/internal/controller/util/cephfs_cg.go index 2f7672631..2cceaced4 100644 --- a/internal/controller/util/cephfs_cg.go +++ b/internal/controller/util/cephfs_cg.go @@ -7,8 +7,8 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" ramenutils "github.com/backube/volsync/controllers/utils" "github.com/go-logr/logr" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" diff --git a/internal/controller/util/cephfs_cg_test.go b/internal/controller/util/cephfs_cg_test.go index 5ee2635db..dd83e962c 100644 --- a/internal/controller/util/cephfs_cg_test.go +++ b/internal/controller/util/cephfs_cg_test.go @@ -5,8 +5,8 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" ramenutils "github.com/backube/volsync/controllers/utils" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ramendr/ramen/api/v1alpha1" diff --git a/internal/controller/util/labels.go b/internal/controller/util/labels.go index 19e42043f..a1bcaa9b9 100644 --- a/internal/controller/util/labels.go +++ b/internal/controller/util/labels.go @@ -12,7 +12,8 @@ const ( labelOwnerNamespaceName = "ramendr.openshift.io/owner-namespace-name" labelOwnerName = "ramendr.openshift.io/owner-name" - MModesLabel = "ramendr.openshift.io/maintenancemodes" + MModesLabel = "ramendr.openshift.io/maintenancemodes" + ExcludeFromVeleroBackup = "velero.io/exclude-from-backup" ) type Labels map[string]string @@ -69,3 +70,25 @@ func OwnerNamespacedName(owner metav1.Object) types.NamespacedName { Name: ownerName, } } + +func DoesObjectOwnerLabelsMatch(object, owner metav1.Object) bool { + objectLabels := object.GetLabels() + pvcOwnerNS, okNS := objectLabels[labelOwnerNamespaceName] + pvcOwnerName, okName := objectLabels[labelOwnerName] + if (!okNS && okName) || (okNS && !okName) { + return false + } else if !okNS && !okName { + return true + } + ownerLabels := OwnerLabels(owner) + vrgNS := ownerLabels[labelOwnerNamespaceName] + vrgName := ownerLabels[labelOwnerName] + if vrgNS != pvcOwnerNS { + return false + } + + if vrgName != pvcOwnerName { + return false + } + return true +} diff --git a/internal/controller/util/managedcluster.go b/internal/controller/util/managedcluster.go index 1027fe31e..0a13c214c 100644 --- a/internal/controller/util/managedcluster.go +++ b/internal/controller/util/managedcluster.go @@ -7,9 +7,9 @@ import ( "context" "fmt" - ocmv1 "github.com/open-cluster-management/api/cluster/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + ocmv1 "open-cluster-management.io/api/cluster/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/internal/controller/util/misc.go b/internal/controller/util/misc.go index cd152d502..da2dab237 100644 --- a/internal/controller/util/misc.go +++ b/internal/controller/util/misc.go @@ -93,7 +93,7 @@ func AddLabel(obj client.Object, key, value string) bool { labels = map[string]string{} } - if _, ok := labels[key]; !ok { + if v, ok := labels[key]; !ok || v != value { labels[key] = value obj.SetLabels(labels) @@ -103,6 +103,26 @@ func AddLabel(obj client.Object, key, value string) bool { return !labelAdded } +func UpdateLabel(obj client.Object, key, newValue string) bool { + const labelUpdated = true + + labels := obj.GetLabels() + if labels == nil { + return !labelUpdated + } + + if currValue, ok := labels[key]; ok { + if currValue != newValue { + labels[key] = newValue + obj.SetLabels(labels) + + return labelUpdated + } + } + + return !labelUpdated +} + func HasLabel(obj client.Object, key string) bool { labels := obj.GetLabels() for k := range labels { @@ -222,3 +242,12 @@ func IsCGEnabled(annotations map[string]string) bool { return annotations[IsCGEnabledAnnotation] == "true" } + +func TrimToK8sResourceNameLength(name string) string { + const maxLength = 63 + if len(name) > maxLength { + return name[:maxLength] + } + + return name +} diff --git a/internal/controller/util/mw_util.go b/internal/controller/util/mw_util.go index 381279842..55b7a04d9 100644 --- a/internal/controller/util/mw_util.go +++ b/internal/controller/util/mw_util.go @@ -10,7 +10,6 @@ import ( "reflect" "github.com/go-logr/logr" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" errorswrapper "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -21,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" + ocmworkv1 "open-cluster-management.io/api/work/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -354,6 +354,10 @@ func (mwu *MWUtil) CreateOrUpdateNamespaceManifest( manifests, annotations) + manifestWork.Spec.DeleteOption = &ocmworkv1.DeleteOption{ + PropagationPolicy: ocmworkv1.DeletePropagationPolicyTypeOrphan, + } + return mwu.createOrUpdateManifestWork(manifestWork, managedClusterNamespace) } @@ -634,25 +638,42 @@ func (mwu *MWUtil) GetVRGManifestWorkCount(drClusters []string) int { return count } -func (mwu *MWUtil) DeleteManifestWorksForCluster(clusterName string) error { - // VRG - err := mwu.deleteManifestWorkWrapper(clusterName, MWTypeVRG) +func (mwu *MWUtil) DeleteNamespaceManifestWork(clusterName string, annotations map[string]string) error { + mwName := mwu.BuildManifestWorkName(MWTypeNS) + mw := &ocmworkv1.ManifestWork{} + + err := mwu.Client.Get(mwu.Ctx, types.NamespacedName{Name: mwName, Namespace: clusterName}, mw) if err != nil { - mwu.Log.Error(err, "failed to delete MW for VRG") + if errors.IsNotFound(err) { + return nil + } - return fmt.Errorf("failed to delete ManifestWork for VRG in namespace %s (%w)", clusterName, err) + return fmt.Errorf("failed to retrieve manifestwork for type: %s. Error: %w", mwName, err) } - // The ManifestWork that created a Namespace is intentionally left on the server + // check if the mw already has a delete Timestamp, the mw is already deleted. + if ResourceIsDeleted(mw) { + return nil + } - return nil -} + // check if the manifestwork has delete Option set + // if not set, call CreateOrUpdateNamespaceManifest such that it is + // updated with the delete option + if mw.Spec.DeleteOption == nil { + err = mwu.CreateOrUpdateNamespaceManifest(mwu.InstName, mwu.TargetNamespace, clusterName, annotations) + if err != nil { + mwu.Log.Info("error creating namespace via ManifestWork", "error", err, "cluster", clusterName) -func (mwu *MWUtil) deleteManifestWorkWrapper(fromCluster string, mwType string) error { - mwName := mwu.BuildManifestWorkName(mwType) - mwNamespace := fromCluster + return err + } + } - return mwu.DeleteManifestWork(mwName, mwNamespace) + err = mwu.DeleteManifestWork(mwName, clusterName) + if err != nil { + return fmt.Errorf("%w", err) + } + + return nil } func (mwu *MWUtil) DeleteManifestWork(mwName, mwNamespace string) error { diff --git a/internal/controller/util/mw_util_test.go b/internal/controller/util/mw_util_test.go index 1ea9b29c7..c64280e05 100644 --- a/internal/controller/util/mw_util_test.go +++ b/internal/controller/util/mw_util_test.go @@ -8,9 +8,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - ocmworkv1 "github.com/open-cluster-management/api/work/v1" rmnutil "github.com/ramendr/ramen/internal/controller/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ocmworkv1 "open-cluster-management.io/api/work/v1" ) var _ = Describe("IsManifestInAppliedState", func() { diff --git a/internal/controller/util/nslock.go b/internal/controller/util/nslock.go new file mode 100644 index 000000000..1471217d4 --- /dev/null +++ b/internal/controller/util/nslock.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "sync" +) + +// NamespaceLock implements atomic operation for namespace. It will have the namespace +// having multiple vrgs in which VRGs are being processed. +type NamespaceLock struct { + //Namespaces sets.String + //mux sync.Mutex + nslock map[string]*sync.Mutex +} + +// NewNamespaceLock returns new NamespaceLock +func NewNamespaceLock() *NamespaceLock { + // return &NamespaceLock{ + // Namespaces: sets.NewString(), + // } + return &NamespaceLock{ + nslock: make(map[string]*sync.Mutex), + } +} + +// TryToAcquireLock tries to acquire the lock for processing VRG in a namespace having +// multiple VRGs and returns true if successful. +// If processing has already begun in the namespace, returns false. +func (nl *NamespaceLock) TryToAcquireLock(namespace string) bool { + // If key is found, return false + // if key is not found, add key and also lock + if _, ok := nl.nslock[namespace]; ok { + if nl.nslock[namespace] != nil { + return nl.nslock[namespace].TryLock() + } else { + // Key exists but not initialized + nl.nslock[namespace] = new(sync.Mutex) + nl.nslock[namespace].Lock() + return true + } + // if nl.nslock[namespace] == nil { + // nl.nslock[namespace] = new(sync.Mutex) + // nl.nslock[namespace].Lock() + // return true + // } + } else { + nl.nslock[namespace] = new(sync.Mutex) + nl.nslock[namespace].Lock() + return true + } + //return false +} + +// Release removes lock on the namespace +func (nl *NamespaceLock) Release(namespace string) { + //nl.mux.Lock() + //defer nl.mux.Unlock() + //nl.Namespaces.Delete(namespace) + //nl.mux.Unlock() + nl.nslock[namespace].Unlock() + delete(nl.nslock, namespace) +} diff --git a/internal/controller/util/nslock_test.go b/internal/controller/util/nslock_test.go new file mode 100644 index 000000000..4bb648f3c --- /dev/null +++ b/internal/controller/util/nslock_test.go @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/ramendr/ramen/internal/controller/util" +) + +var _ = Describe("Testing Locks", func() { + nsLock := util.NewNamespaceLock() + Expect(nsLock.TryToAcquireLock("test")).To(BeTrue()) + Expect(nsLock.TryToAcquireLock("test")).To(BeFalse()) + nsLock.Release("test") + Expect(nsLock.TryToAcquireLock("test")).To(BeTrue()) +}) diff --git a/internal/controller/util/pvcs_util.go b/internal/controller/util/pvcs_util.go index f4f513672..98ed4a1a6 100644 --- a/internal/controller/util/pvcs_util.go +++ b/internal/controller/util/pvcs_util.go @@ -230,6 +230,18 @@ func isPodReady(podConditions []corev1.PodCondition) bool { return false } +func GetPVC(ctx context.Context, k8sClient client.Client, pvcNamespacedName types.NamespacedName, +) (*corev1.PersistentVolumeClaim, error) { + pvc := &corev1.PersistentVolumeClaim{} + + err := k8sClient.Get(ctx, pvcNamespacedName, pvc) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return pvc, nil +} + func DeletePVC(ctx context.Context, k8sClient client.Client, pvcName, namespace string, diff --git a/internal/controller/util/util_suite_test.go b/internal/controller/util/util_suite_test.go index 8d04be472..25bfa00db 100644 --- a/internal/controller/util/util_suite_test.go +++ b/internal/controller/util/util_suite_test.go @@ -12,8 +12,8 @@ import ( volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" "github.com/go-logr/logr" - groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumegroupsnapshot/v1alpha1" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + groupsnapv1alpha1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/format" diff --git a/internal/controller/volsync/secret_propagator.go b/internal/controller/volsync/secret_propagator.go index 1c6971cfc..ff88a1e8c 100644 --- a/internal/controller/volsync/secret_propagator.go +++ b/internal/controller/volsync/secret_propagator.go @@ -174,7 +174,7 @@ func (sp *secretPropagator) reconcileSecretPropagationPolicy() error { return fmt.Errorf("%w", err) } - util.AddLabel(policy, "velero.io/exclude-from-backup", "true") + util.AddLabel(policy, util.ExcludeFromVeleroBackup, "true") policy.Spec = policyv1.PolicySpec{ Disabled: false, diff --git a/internal/controller/volsync/secret_propagator_test.go b/internal/controller/volsync/secret_propagator_test.go index 2685d3228..c83dcc6c1 100644 --- a/internal/controller/volsync/secret_propagator_test.go +++ b/internal/controller/volsync/secret_propagator_test.go @@ -214,7 +214,7 @@ var _ = Describe("Secret_propagator", func() { Expect(plBindingSubject.APIGroup).To(Equal("policy.open-cluster-management.io")) Expect(plBindingSubject.Kind).To(Equal("Policy")) Expect(plBindingSubject.Name).To(Equal(createdPolicy.GetName())) - Expect(createdPolicy.GetLabels()["velero.io/exclude-from-backup"]).Should(Equal("true")) + Expect(createdPolicy.GetLabels()[util.ExcludeFromVeleroBackup]).Should(Equal("true")) }) Context("When Policy name combined with namespace is longer than 62 characters", func() { diff --git a/internal/controller/volsync/volsync_suite_test.go b/internal/controller/volsync/volsync_suite_test.go index 5f567aae4..cee30b858 100644 --- a/internal/controller/volsync/volsync_suite_test.go +++ b/internal/controller/volsync/volsync_suite_test.go @@ -13,7 +13,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" plrulev1 "github.com/stolostron/multicloud-operators-placementrule/pkg/apis/apps/v1" "go.uber.org/zap/zapcore" storagev1 "k8s.io/api/storage/v1" diff --git a/internal/controller/volsync/vshandler.go b/internal/controller/volsync/vshandler.go index a33cd7b1a..6457c9355 100644 --- a/internal/controller/volsync/vshandler.go +++ b/internal/controller/volsync/vshandler.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/go-logr/logr" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" diff --git a/internal/controller/volsync/vshandler_test.go b/internal/controller/volsync/vshandler_test.go index 9f9c00102..8de0105cd 100644 --- a/internal/controller/volsync/vshandler_test.go +++ b/internal/controller/volsync/vshandler_test.go @@ -9,7 +9,7 @@ import ( "strings" "time" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -1216,14 +1216,12 @@ var _ = Describe("VolSync_Handler", func() { pvc := &corev1.PersistentVolumeClaim{} JustBeforeEach(func() { // Common checks for everything in this context - pvc should be created with correct spec - Expect(ensurePVCErr).NotTo(HaveOccurred()) - Eventually(func() error { return k8sClient.Get(ctx, types.NamespacedName{ Name: pvcName, Namespace: testNamespace.GetName(), }, pvc) - }, maxWait, interval).Should(Succeed()) + }, maxWait, interval).Should(Succeed(), fmt.Sprintf("Original error %v", ensurePVCErr)) Expect(pvc.GetName()).To(Equal(pvcName)) Expect(pvc.Spec.AccessModes).To(Equal([]corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce})) diff --git a/internal/controller/volumereplicationgroup_controller.go b/internal/controller/volumereplicationgroup_controller.go index 73d772695..71517ade4 100644 --- a/internal/controller/volumereplicationgroup_controller.go +++ b/internal/controller/volumereplicationgroup_controller.go @@ -53,8 +53,9 @@ type VolumeReplicationGroupReconciler struct { Scheme *runtime.Scheme eventRecorder *rmnutil.EventReporter kubeObjects kubeobjects.RequestsManager - RateLimiter *workqueue.RateLimiter + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] veleroCRsAreWatched bool + locks *rmnutil.NamespaceLock } // SetupWithManager sets up the controller with the Manager. @@ -65,11 +66,11 @@ func (r *VolumeReplicationGroupReconciler) SetupWithManager( r.Log.Info("Adding VolumeReplicationGroup controller") - rateLimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 1*time.Minute), + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](1*time.Second, 1*time.Minute), // defaults from client-go //nolint: gomnd - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &workqueue.TypedBucketRateLimiter[reconcile.Request]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) if r.RateLimiter != nil { rateLimiter = *r.RateLimiter @@ -115,6 +116,8 @@ func (r *VolumeReplicationGroupReconciler) SetupWithManager( r.Log.Info("Kube object protection disabled; don't watch kube objects requests") } + r.locks = rmnutil.NewNamespaceLock() + return ctrlBuilder.Complete(r) } @@ -437,6 +440,13 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct "Please install velero/oadp and restart the operator", v.instance.Namespace, v.instance.Name) } + lockedns, err := v.vrgParallelProcessingCheck(adminNamespaceVRG) + + if err != nil { + // Requeue in order to get the lock and try vrg processing again + return ctrl.Result{Requeue: true}, nil + } + v.volSyncHandler = volsync.NewVSHandler(ctx, r.Client, log, v.instance, v.instance.Spec.Async, cephFSCSIDriverNameOrDefault(v.ramenConfig), volSyncDestinationCopyMethodOrDefault(v.ramenConfig), adminNamespaceVRG) @@ -451,6 +461,11 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct "Initializing VolumeReplicationGroup") res := v.processVRG() + if lockedns != "" { + v.reconciler.locks.Release(lockedns) + v.log.Info("****ASN, released lock ", "ns=", lockedns, " vrg name=", v.instance.Name) + } + // Test for RBD(setup exists), CephFS delayResetIfRequeueTrue(&res, v.log) log.Info("Reconcile return", "result", res, "VolRep count", len(v.volRepPVCs), "VolSync count", len(v.volSyncPVCs)) @@ -1339,6 +1354,7 @@ func (v *VRGInstance) updateVRGConditions() { volSyncDataProtected, v.aggregateVolRepDataProtectedCondition(), ) + // TODO: Check for possible cases here. logAndSet(VRGConditionTypeClusterDataProtected, volSyncClusterDataProtected, v.aggregateVolRepClusterDataProtectedCondition(), @@ -1626,3 +1642,37 @@ func (r *VolumeReplicationGroupReconciler) addKubeObjectsOwnsAndWatches(ctrlBuil return ctrlBuilder } + +func (v *VRGInstance) vrgParallelProcessingCheck(adminNamespaceVRG bool) (string, error) { + ns := v.instance.Namespace + + if !adminNamespaceVRG { + vrgList := &ramendrv1alpha1.VolumeReplicationGroupList{} + listOps := &client.ListOptions{ + Namespace: ns, + } + err := v.reconciler.APIReader.List(context.Background(), vrgList, listOps) + + if err != nil { + v.log.Error(err, "Unable to list the VRGs in the", " namespace ", ns) + + return "", err + } + + // if the number of vrgs in the ns is more than 1, lock is needed. + if len(vrgList.Items) > 1 { + isLockAcquired := v.reconciler.locks.TryToAcquireLock(ns) + v.log.Info("****ASN, checking lock ", "ns=", ns, " isLockAcquired=", isLockAcquired, " vrg name=", v.instance.Name) + if !isLockAcquired { + // Acquiring lock failed, VRG reconcile should be requeued + return "", fmt.Errorf("error aquiring lock on the namespace %s", ns) + } else { + return ns, nil + } + } else { + return "", nil + } + } else { + return "", nil + } +} diff --git a/internal/controller/vrg_kubeobjects.go b/internal/controller/vrg_kubeobjects.go index f13c1e106..ea02e3ab9 100644 --- a/internal/controller/vrg_kubeobjects.go +++ b/internal/controller/vrg_kubeobjects.go @@ -23,6 +23,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" ) +var ErrWorkflowNotFound = fmt.Errorf("backup or restore workflow not found") + func kubeObjectsCaptureInterval(kubeObjectProtectionSpec *ramen.KubeObjectProtectionSpec) time.Duration { if kubeObjectProtectionSpec.CaptureInterval == nil { return ramen.KubeObjectProtectionCaptureIntervalDefault @@ -709,7 +711,20 @@ func kubeObjectsRequestsWatch( } func getCaptureGroups(recipe Recipe.Recipe) ([]kubeobjects.CaptureSpec, error) { - workflow := recipe.Spec.CaptureWorkflow + var workflow *Recipe.Workflow + + for _, w := range recipe.Spec.Workflows { + if w.Name == Recipe.BackupWorkflowName { + workflow = w + + break + } + } + + if workflow == nil { + return nil, ErrWorkflowNotFound + } + resources := make([]kubeobjects.CaptureSpec, len(workflow.Sequence)) for index, resource := range workflow.Sequence { @@ -729,7 +744,20 @@ func getCaptureGroups(recipe Recipe.Recipe) ([]kubeobjects.CaptureSpec, error) { } func getRecoverGroups(recipe Recipe.Recipe) ([]kubeobjects.RecoverSpec, error) { - workflow := recipe.Spec.RecoverWorkflow + var workflow *Recipe.Workflow + + for _, w := range recipe.Spec.Workflows { + if w.Name == Recipe.RestoreWorkflowName { + workflow = w + + break + } + } + + if workflow == nil { + return nil, ErrWorkflowNotFound + } + resources := make([]kubeobjects.RecoverSpec, len(workflow.Sequence)) for index, resource := range workflow.Sequence { diff --git a/internal/controller/vrg_kubeobjects_test.go b/internal/controller/vrg_kubeobjects_test.go index 2e32086cd..373ccf5e2 100644 --- a/internal/controller/vrg_kubeobjects_test.go +++ b/internal/controller/vrg_kubeobjects_test.go @@ -5,8 +5,6 @@ package controllers //nolint: testpackage import ( - "time" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -25,9 +23,7 @@ var _ = Describe("VRG_KubeObjectProtection", func() { var group *Recipe.Group BeforeEach(func() { - duration, err := time.ParseDuration("30s") - - Expect(err).ToNot(HaveOccurred()) + duration := 30 hook = &Recipe.Hook{ Namespace: namespaceName, @@ -43,8 +39,8 @@ var _ = Describe("VRG_KubeObjectProtection", func() { { Name: "checkpoint", Container: "main", - Timeout: &metav1.Duration{Duration: duration}, - Command: []string{"bash", "/scripts/checkpoint.sh"}, + Timeout: duration, + Command: "bash /scripts/checkpoint.sh", }, }, Chks: []*Recipe.Check{}, diff --git a/internal/controller/vrg_pvc_selector_test.go b/internal/controller/vrg_pvc_selector_test.go index abf5b12c6..318f8f9fa 100644 --- a/internal/controller/vrg_pvc_selector_test.go +++ b/internal/controller/vrg_pvc_selector_test.go @@ -201,9 +201,7 @@ func getVRGDefinitionWithKubeObjectProtection(hasPVCSelectorLabels bool, namespa } func getTestHook() *Recipe.Hook { - duration, err := time.ParseDuration("30s") - - Expect(err).ToNot(HaveOccurred()) + duration := 30 return &Recipe.Hook{ Name: "hook-single", @@ -218,8 +216,8 @@ func getTestHook() *Recipe.Hook { { Name: "checkpoint", Container: "main", - Timeout: &metav1.Duration{Duration: duration}, - Command: []string{"bash", "/scripts/checkpoint.sh"}, + Timeout: duration, + Command: "bash /scripts/checkpoint.sh", }, }, Chks: []*Recipe.Check{}, @@ -270,16 +268,19 @@ func getRecipeDefinition(namespace string) *Recipe.Recipe { Groups: []*Recipe.Group{getTestGroup()}, Volumes: getTestVolumeGroup(), Hooks: []*Recipe.Hook{getTestHook()}, - CaptureWorkflow: &Recipe.Workflow{ - Sequence: []map[string]string{ - { - "group": "test-group-volume", - }, - { - "group": "test-group", - }, - { - "hook": "test-hook", + Workflows: []*Recipe.Workflow{ + { + Name: "backup", + Sequence: []map[string]string{ + { + "group": "test-group-volume", + }, + { + "group": "test-group", + }, + { + "hook": "test-hook", + }, }, }, }, diff --git a/internal/controller/vrg_recipe.go b/internal/controller/vrg_recipe.go index 26de83f61..5c46a50fb 100644 --- a/internal/controller/vrg_recipe.go +++ b/internal/controller/vrg_recipe.go @@ -188,25 +188,25 @@ func recipeWorkflowsGet(recipe recipe.Recipe, recipeElements *RecipeElements, vr ) error { var err error - if recipe.Spec.CaptureWorkflow == nil { + recipeElements.CaptureWorkflow, err = getCaptureGroups(recipe) + if err != nil && err != ErrWorkflowNotFound { + return fmt.Errorf("failed to get groups from capture workflow: %w", err) + } + + if err != nil { recipeElements.CaptureWorkflow = captureWorkflowDefault(vrg, ramenConfig) - } else { - recipeElements.CaptureWorkflow, err = getCaptureGroups(recipe) - if err != nil { - return fmt.Errorf("failed to get groups from capture workflow: %w", err) - } } - if recipe.Spec.RecoverWorkflow == nil { + recipeElements.RecoverWorkflow, err = getRecoverGroups(recipe) + if err != nil && err != ErrWorkflowNotFound { + return fmt.Errorf("failed to get groups from recovery workflow: %w", err) + } + + if err != nil { recipeElements.RecoverWorkflow = recoverWorkflowDefault(vrg, ramenConfig) - } else { - recipeElements.RecoverWorkflow, err = getRecoverGroups(recipe) - if err != nil { - return fmt.Errorf("failed to get groups from recovery workflow: %w", err) - } } - return err + return nil } func recipeNamespacesValidate(recipeElements RecipeElements, vrg ramen.VolumeReplicationGroup, diff --git a/internal/controller/vrg_recipe_test.go b/internal/controller/vrg_recipe_test.go index 39454d200..692e3e0ca 100644 --- a/internal/controller/vrg_recipe_test.go +++ b/internal/controller/vrg_recipe_test.go @@ -132,7 +132,7 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() { Ops: []*recipe.Operation{ { Name: namespaceName, - Command: []string{namespaceName}, + Command: namespaceName, }, }, } @@ -145,6 +145,8 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() { }, Spec: recipe.RecipeSpec{}, } + + r.Spec.Workflows = make([]*recipe.Workflow, 0) } recipeVolumesDefine := func(volumes *recipe.Group) { r.Spec.Volumes = volumes @@ -171,10 +173,12 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() { } } recipeCaptureWorkflowDefine := func(workflow *recipe.Workflow) { - r.Spec.CaptureWorkflow = workflow + workflow.Name = recipe.BackupWorkflowName + r.Spec.Workflows = append(r.Spec.Workflows, workflow) } recipeRecoverWorkflowDefine := func(workflow *recipe.Workflow) { - r.Spec.RecoverWorkflow = workflow + workflow.Name = recipe.RestoreWorkflowName + r.Spec.Workflows = append(r.Spec.Workflows, workflow) } recipeCreate := func() { Expect(k8sClient.Create(ctx, r)).To(Succeed()) diff --git a/internal/controller/vrg_volrep.go b/internal/controller/vrg_volrep.go index 022056162..2cd00c57c 100644 --- a/internal/controller/vrg_volrep.go +++ b/internal/controller/vrg_volrep.go @@ -66,7 +66,7 @@ func (v *VRGInstance) reconcileVolRepsAsPrimary() { } // If VR did not reach primary state, it is fine to still upload the PV and continue processing - requeueResult, _, err := v.processVRAsPrimary(pvcNamespacedName, log) + requeueResult, _, err := v.processVRAsPrimary(pvcNamespacedName, pvc, log) if requeueResult { v.requeue() } @@ -158,7 +158,7 @@ func (v *VRGInstance) reconcileVRAsSecondary(pvc *corev1.PersistentVolumeClaim, pvcNamespacedName := types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace} - requeueResult, ready, err := v.processVRAsSecondary(pvcNamespacedName, log) + requeueResult, ready, err := v.processVRAsSecondary(pvcNamespacedName, pvc, log) if err != nil { log.Info("Failure in getting or creating VolumeReplication resource for PersistentVolumeClaim", "errorValue", err) @@ -331,7 +331,9 @@ func (v *VRGInstance) protectPVC(pvc *corev1.PersistentVolumeClaim, log logr.Log vrg := v.instance ownerAdded := false - switch comparison := rmnutil.ObjectOwnerSetIfNotAlready(pvc, vrg); comparison { + comparison := rmnutil.ObjectOwnerSetIfNotAlready(pvc, vrg) + log.Info("***ASN, pvc ownership comparision result ", "pvcName=", pvc.Name, "comparision= ", comparison) + switch comparison { case rmnutil.Absent: ownerAdded = true case rmnutil.Same: @@ -454,6 +456,8 @@ func (v *VRGInstance) preparePVCForVRDeletion(pvc *corev1.PersistentVolumeClaim, pvc.Spec.VolumeName, pvc.Namespace, pvc.Name, v.instance.Namespace, v.instance.Name, err) } + log.Info("Deleted ramen annotations from PersistentVolume", "pv", pv.Name) + ownerRemoved := rmnutil.ObjectOwnerUnsetIfSet(pvc, vrg) // Remove VR finalizer from PVC and the annotation (PVC maybe left behind, so remove the annotation) finalizerRemoved := controllerutil.RemoveFinalizer(pvc, PvcVRFinalizerProtected) @@ -470,8 +474,8 @@ func (v *VRGInstance) preparePVCForVRDeletion(pvc *corev1.PersistentVolumeClaim, pvc.Namespace, pvc.Name, v.instance.Namespace, v.instance.Name, err) } - log1.Info("PVC update for VR deletion", - "finalizers", pvc.GetFinalizers(), "labels", pvc.GetLabels(), "annotations", pvc.GetAnnotations()) + log1.Info("Deleted ramen annotations, labels, and finallizers from PersistentVolumeClaim", + "annotations", pvc.GetAnnotations(), "labels", pvc.GetLabels(), "finalizers", pvc.GetFinalizers()) return nil } @@ -530,37 +534,43 @@ func (v *VRGInstance) generateArchiveAnnotation(gen int64) string { return fmt.Sprintf("%s-%s", pvcVRAnnotationArchivedVersionV1, strconv.Itoa(int(gen))) } -func (v *VRGInstance) isArchivedAlready(pvc *corev1.PersistentVolumeClaim, log logr.Logger) bool { +func (v *VRGInstance) isArchivedAlready(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, bool) { pvHasAnnotation := false pvcHasAnnotation := false - + pvcOwnerLabelSame, pvOwnerLabelSame := false, false + vrg := v.instance pv, err := v.getPVFromPVC(pvc) if err != nil { log.Error(err, "Failed to get PV to check if archived") - return false + return false, false } - + // Check should also include owner label along with archived annotation so that VRG + // is not considering wrong (This PVC is already protected by someone else) pvcDesiredValue := v.generateArchiveAnnotation(pvc.Generation) if v, ok := pvc.ObjectMeta.Annotations[pvcVRAnnotationArchivedKey]; ok && (v == pvcDesiredValue) { + pvcOwnerLabelSame = rmnutil.DoesObjectOwnerLabelsMatch(pvc, vrg) pvcHasAnnotation = true } pvDesiredValue := v.generateArchiveAnnotation(pv.Generation) if v, ok := pv.ObjectMeta.Annotations[pvcVRAnnotationArchivedKey]; ok && (v == pvDesiredValue) { pvHasAnnotation = true + pvOwnerLabelSame = rmnutil.DoesObjectOwnerLabelsMatch(&pv, vrg) } - if !pvHasAnnotation || !pvcHasAnnotation { - return false - } - - return true + return !pvcHasAnnotation || !pvHasAnnotation, !pvcOwnerLabelSame || !pvOwnerLabelSame } // Upload PV to the list of S3 stores in the VRG spec func (v *VRGInstance) uploadPVandPVCtoS3Stores(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (err error) { - if v.isArchivedAlready(pvc, log) { + if hasAnnotation, hasSameOwner := v.isArchivedAlready(pvc, log); hasAnnotation { + log.Info("****ASN, in isAlreadyArchived ", " hasAnnotation=", hasAnnotation, " hasSameOwner=", hasSameOwner) + if !hasSameOwner { + msg := "pvc is already owned by different vrg" + v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonPVOrPVCOwnedByDifferentOwner, msg) + return errors.New(msg) + } msg := fmt.Sprintf("PV cluster data already protected for PVC %s", pvc.Name) v.updatePVCClusterDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonUploaded, msg) @@ -845,7 +855,7 @@ func (v *VRGInstance) reconcileVRForDeletion(pvc *corev1.PersistentVolumeClaim, return !requeue } } else { - requeueResult, ready, err := v.processVRAsPrimary(pvcNamespacedName, log) + requeueResult, ready, err := v.processVRAsPrimary(pvcNamespacedName, pvc, log) switch { case err != nil: log.Info("Requeuing due to failure in getting or creating VolumeReplication resource for PersistentVolumeClaim", @@ -886,14 +896,17 @@ func (v *VRGInstance) undoPVCFinalizersAndPVRetention(pvc *corev1.PersistentVolu // reconcileMissingVR determines if VR is missing, and if missing completes other steps required for // reconciliation during deletion. -// VR can be missing, -// - if no VR was created post initial processing, by when VRG was deleted. In this case -// no PV was also uploaded, as VR is created first before PV is uploaded. -// - if VR was deleted in a prior reconcile, during VRG deletion, but steps post VR deletion were not -// completed, at this point a deleted VR is also not processed further (its generation would have been updated) -// Returns 2 booleans, -// - the first indicating if VR is missing or not, to enable further VR processing if needed -// - the next indicating any required requeue of the request, due to errors in determining VR presence +// +// VR can be missing: +// - if no VR was created post initial processing, by when VRG was deleted. In this case no PV was also +// uploaded, as VR is created first before PV is uploaded. +// - if VR was deleted in a prior reconcile, during VRG deletion, but steps post VR deletion were not +// completed, at this point a deleted VR is also not processed further (its generation would have been +// updated) +// +// Returns 2 booleans: +// - the first indicating if VR is missing or not, to enable further VR processing if needed +// - the next indicating any required requeue of the request, due to errors in determining VR presence func (v *VRGInstance) reconcileMissingVR(pvc *corev1.PersistentVolumeClaim, log logr.Logger) (bool, bool) { const ( requeue = true @@ -910,7 +923,7 @@ func (v *VRGInstance) reconcileMissingVR(pvc *corev1.PersistentVolumeClaim, log err := v.reconciler.Get(v.ctx, vrNamespacedName, volRep) if err == nil { if rmnutil.ResourceIsDeleted(volRep) { - log.Info("Requeuing due to processing a VR under deletion") + log.Info("Requeuing due to processing a deleted VR") return !vrMissing, requeue } @@ -924,7 +937,7 @@ func (v *VRGInstance) reconcileMissingVR(pvc *corev1.PersistentVolumeClaim, log return !vrMissing, requeue } - log.Info("Preparing PVC as VR is detected as missing or deleted") + log.Info("Unprotecting PVC as VR is missing") if err := v.preparePVCForVRDeletion(pvc, log); err != nil { log.Info("Requeuing due to failure in preparing PersistentVolumeClaim for deletion", @@ -1009,9 +1022,11 @@ func (v *VRGInstance) s3StoreDo(do func(ObjectStorer) error, msg, s3ProfileName // - a boolean indicating if a reconcile requeue is required // - a boolean indicating if VR is already at the desired state // - any errors during processing -func (v *VRGInstance) processVRAsPrimary(vrNamespacedName types.NamespacedName, log logr.Logger) (bool, bool, error) { +func (v *VRGInstance) processVRAsPrimary(vrNamespacedName types.NamespacedName, + pvc *corev1.PersistentVolumeClaim, log logr.Logger, +) (bool, bool, error) { if v.instance.Spec.Async != nil { - return v.createOrUpdateVR(vrNamespacedName, volrep.Primary, log) + return v.createOrUpdateVR(vrNamespacedName, pvc, volrep.Primary, log) } // TODO: createOrUpdateVR does two things. It modifies the VR and also @@ -1041,9 +1056,11 @@ func (v *VRGInstance) processVRAsPrimary(vrNamespacedName types.NamespacedName, // - a boolean indicating if a reconcile requeue is required // - a boolean indicating if VR is already at the desired state // - any errors during processing -func (v *VRGInstance) processVRAsSecondary(vrNamespacedName types.NamespacedName, log logr.Logger) (bool, bool, error) { +func (v *VRGInstance) processVRAsSecondary(vrNamespacedName types.NamespacedName, + pvc *corev1.PersistentVolumeClaim, log logr.Logger, +) (bool, bool, error) { if v.instance.Spec.Async != nil { - return v.createOrUpdateVR(vrNamespacedName, volrep.Secondary, log) + return v.createOrUpdateVR(vrNamespacedName, pvc, volrep.Secondary, log) } // TODO: createOrUpdateVR does two things. It modifies the VR and also @@ -1080,7 +1097,7 @@ func (v *VRGInstance) processVRAsSecondary(vrNamespacedName types.NamespacedName // - a boolean indicating if VR is already at the desired state // - any errors during processing func (v *VRGInstance) createOrUpdateVR(vrNamespacedName types.NamespacedName, - state volrep.ReplicationState, log logr.Logger, + pvc *corev1.PersistentVolumeClaim, state volrep.ReplicationState, log logr.Logger, ) (bool, bool, error) { const requeue = true @@ -1124,7 +1141,7 @@ func (v *VRGInstance) createOrUpdateVR(vrNamespacedName types.NamespacedName, return !requeue, false, nil } - return v.updateVR(volRep, state, log) + return v.updateVR(pvc, volRep, state, log) } func (v *VRGInstance) autoResync(state volrep.ReplicationState) bool { @@ -1143,7 +1160,7 @@ func (v *VRGInstance) autoResync(state volrep.ReplicationState) bool { // - a boolean indicating if a reconcile requeue is required // - a boolean indicating if VR is already at the desired state // - any errors during the process of updating the resource -func (v *VRGInstance) updateVR(volRep *volrep.VolumeReplication, +func (v *VRGInstance) updateVR(pvc *corev1.PersistentVolumeClaim, volRep *volrep.VolumeReplication, state volrep.ReplicationState, log logr.Logger, ) (bool, bool, error) { const requeue = true @@ -1152,7 +1169,7 @@ func (v *VRGInstance) updateVR(volRep *volrep.VolumeReplication, if volRep.Spec.ReplicationState == state && volRep.Spec.AutoResync == v.autoResync(state) { log.Info("VolumeReplication and VolumeReplicationGroup state and autoresync match. Proceeding to status check") - return !requeue, v.checkVRStatus(volRep), nil + return !requeue, v.checkVRStatus(pvc, volRep), nil } volRep.Spec.ReplicationState = state @@ -1166,7 +1183,7 @@ func (v *VRGInstance) updateVR(volRep *volrep.VolumeReplication, rmnutil.EventReasonVRUpdateFailed, err.Error()) msg := "Failed to update VolumeReplication resource" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) return requeue, false, fmt.Errorf("failed to update VolumeReplication resource"+ " (%s/%s) as %s, belonging to VolumeReplicationGroup (%s/%s), %w", @@ -1178,7 +1195,7 @@ func (v *VRGInstance) updateVR(volRep *volrep.VolumeReplication, volRep.GetName(), volRep.GetNamespace(), state)) // Just updated the state of the VolRep. Mark it as progressing. msg := "Updated VolumeReplication resource for PVC" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) return !requeue, false, nil } @@ -1373,7 +1390,7 @@ func (v *VRGInstance) getStorageClass(namespacedName types.NamespacedName) (*sto // checkVRStatus checks if the VolumeReplication resource has the desired status for the // current generation and returns true if so, false otherwise -func (v *VRGInstance) checkVRStatus(volRep *volrep.VolumeReplication) bool { +func (v *VRGInstance) checkVRStatus(pvc *corev1.PersistentVolumeClaim, volRep *volrep.VolumeReplication) bool { // When the generation in the status is updated, VRG would get a reconcile // as it owns VolumeReplication resource. if volRep.GetGeneration() != volRep.Status.ObservedGeneration { @@ -1381,33 +1398,92 @@ func (v *VRGInstance) checkVRStatus(volRep *volrep.VolumeReplication) bool { volRep.GetName(), volRep.GetNamespace())) msg := "VolumeReplication generation not updated in status" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonProgressing, msg) + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonProgressing, msg) return false } switch { case v.instance.Spec.ReplicationState == ramendrv1alpha1.Primary: - return v.validateVRStatus(volRep, ramendrv1alpha1.Primary) + return v.validateVRStatus(pvc, volRep, ramendrv1alpha1.Primary) case v.instance.Spec.ReplicationState == ramendrv1alpha1.Secondary: - return v.validateVRStatus(volRep, ramendrv1alpha1.Secondary) + return v.validateVRStatus(pvc, volRep, ramendrv1alpha1.Secondary) default: v.log.Info(fmt.Sprintf("invalid Replication State %s for VolumeReplicationGroup (%s:%s)", string(v.instance.Spec.ReplicationState), v.instance.Name, v.instance.Namespace)) msg := "VolumeReplicationGroup state invalid" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg) + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg) return false } } // validateVRStatus validates if the VolumeReplication resource has the desired status for the -// current generation and returns true if so, false otherwise -// - When replication state is Primary, only Completed condition is checked. -// - When replication state is Secondary, all 3 conditions for Completed/Degraded/Resyncing is -// checked and ensured healthy. -func (v *VRGInstance) validateVRStatus(volRep *volrep.VolumeReplication, state ramendrv1alpha1.ReplicationState) bool { +// current generation, deletion status, and repliaction state. +// +// We handle 3 cases: +// - Primary deleted VRG: If Validated condition exists and false, the VR will never complete and can be +// deleted safely. Otherwise Completed condition is checked. +// - Primary VRG: Completed condition is checked. +// - Secondary VRG: Completed, Degraded and Resyncing conditions are checked and ensured healthy. +func (v *VRGInstance) validateVRStatus(pvc *corev1.PersistentVolumeClaim, volRep *volrep.VolumeReplication, + state ramendrv1alpha1.ReplicationState, +) bool { + // Check validated for primary during VRG deletion. + if state == ramendrv1alpha1.Primary && rmnutil.ResourceIsDeleted(v.instance) { + validated, ok := v.validateVRValidatedStatus(volRep) + if !validated && ok { + v.log.Info(fmt.Sprintf("VolumeReplication %s/%s failed validation and can be deleted", + volRep.GetName(), volRep.GetNamespace())) + + return true + } + } + + // Check completed for both primary and secondary. + if !v.validateVRCompletedStatus(pvc, volRep, state) { + return false + } + + // if primary, all checks are completed. + if state == ramendrv1alpha1.Secondary { + return v.validateAdditionalVRStatusForSecondary(pvc, volRep) + } + + msg := "PVC in the VolumeReplicationGroup is ready for use" + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReady, msg) + v.updatePVCDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReady, msg) + v.updatePVCLastSyncTime(pvc.Namespace, pvc.Name, volRep.Status.LastSyncTime) + v.updatePVCLastSyncDuration(pvc.Namespace, pvc.Name, volRep.Status.LastSyncDuration) + v.updatePVCLastSyncBytes(pvc.Namespace, pvc.Name, volRep.Status.LastSyncBytes) + v.log.Info(fmt.Sprintf("VolumeReplication resource %s/%s is ready for use", volRep.GetName(), + volRep.GetNamespace())) + + return true +} + +// validateVRValidatedStatus validates that VolumeReplicaion resource was validated. +// Return 2 booleans +// - validated: true if the condition is true, otherwise false +// - ok: true if the check was succeesfull, false if the condition is missing, stale, or unknown. +func (v *VRGInstance) validateVRValidatedStatus( + volRep *volrep.VolumeReplication, +) (bool, bool) { + conditionMet, errorMsg := isVRConditionMet(volRep, volrep.ConditionValidated, metav1.ConditionTrue) + if errorMsg != "" { + v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", errorMsg, volRep.GetName(), volRep.GetNamespace())) + } + + return conditionMet, errorMsg == "" +} + +// validateVRCompletedStatus validates if the VolumeReplication resource Completed condition is met and update +// the PVC DataReady and Protected conditions. +// Returns true if the condtion is true, false if the condition is missing, stale, ubnknown, of false. +func (v *VRGInstance) validateVRCompletedStatus(pvc *corev1.PersistentVolumeClaim, volRep *volrep.VolumeReplication, + state ramendrv1alpha1.ReplicationState, +) bool { var ( stateString string action string @@ -1422,35 +1498,18 @@ func (v *VRGInstance) validateVRStatus(volRep *volrep.VolumeReplication, state r action = "demoted" } - // it should be completed conditionMet, msg := isVRConditionMet(volRep, volrep.ConditionCompleted, metav1.ConditionTrue) if !conditionMet { defaultMsg := fmt.Sprintf("VolumeReplication resource for pvc not %s to %s", action, stateString) - v.updatePVCDataReadyConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, defaultMsg) - - v.updatePVCDataProtectedConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, defaultMsg) - v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) return false } - // if primary, all checks are completed - if state == ramendrv1alpha1.Secondary { - return v.validateAdditionalVRStatusForSecondary(volRep) - } - - msg = "PVC in the VolumeReplicationGroup is ready for use" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonReady, msg) - v.updatePVCDataProtectedCondition(volRep.Namespace, volRep.Name, VRGConditionReasonReady, msg) - v.updatePVCLastSyncTime(volRep.Namespace, volRep.Name, volRep.Status.LastSyncTime) - v.updatePVCLastSyncDuration(volRep.Namespace, volRep.Name, volRep.Status.LastSyncDuration) - v.updatePVCLastSyncBytes(volRep.Namespace, volRep.Name, volRep.Status.LastSyncBytes) - v.log.Info(fmt.Sprintf("VolumeReplication resource %s/%s is ready for use", volRep.GetName(), - volRep.GetNamespace())) - return true } @@ -1471,22 +1530,24 @@ func (v *VRGInstance) validateVRStatus(volRep *volrep.VolumeReplication, state r // With 2nd condition being met, // ProtectedPVC.Conditions[DataReady] = True // ProtectedPVC.Conditions[DataProtected] = True -func (v *VRGInstance) validateAdditionalVRStatusForSecondary(volRep *volrep.VolumeReplication) bool { - v.updatePVCLastSyncTime(volRep.Namespace, volRep.Name, nil) - v.updatePVCLastSyncDuration(volRep.Namespace, volRep.Name, nil) - v.updatePVCLastSyncBytes(volRep.Namespace, volRep.Name, nil) +func (v *VRGInstance) validateAdditionalVRStatusForSecondary(pvc *corev1.PersistentVolumeClaim, + volRep *volrep.VolumeReplication, +) bool { + v.updatePVCLastSyncTime(pvc.Namespace, pvc.Name, nil) + v.updatePVCLastSyncDuration(pvc.Namespace, pvc.Name, nil) + v.updatePVCLastSyncBytes(pvc.Namespace, pvc.Name, nil) conditionMet, _ := isVRConditionMet(volRep, volrep.ConditionResyncing, metav1.ConditionTrue) if !conditionMet { - return v.checkResyncCompletionAsSecondary(volRep) + return v.checkResyncCompletionAsSecondary(pvc, volRep) } conditionMet, msg := isVRConditionMet(volRep, volrep.ConditionDegraded, metav1.ConditionTrue) if !conditionMet { - v.updatePVCDataProtectedConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, "VolumeReplication resource for pvc is not in Degraded condition while resyncing") - v.updatePVCDataReadyConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, "VolumeReplication resource for pvc is not in Degraded condition while resyncing") v.log.Info(fmt.Sprintf("VolumeReplication resource is not in degraded condition while"+ @@ -1496,8 +1557,8 @@ func (v *VRGInstance) validateAdditionalVRStatusForSecondary(volRep *volrep.Volu } msg = "VolumeReplication resource for the pvc is syncing as Secondary" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonReplicating, msg) - v.updatePVCDataProtectedCondition(volRep.Namespace, volRep.Name, VRGConditionReasonReplicating, msg) + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReplicating, msg) + v.updatePVCDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReplicating, msg) v.log.Info(fmt.Sprintf("VolumeReplication resource for the pvc is syncing as Secondary (%s/%s)", volRep.GetName(), volRep.GetNamespace())) @@ -1506,14 +1567,16 @@ func (v *VRGInstance) validateAdditionalVRStatusForSecondary(volRep *volrep.Volu } // checkResyncCompletionAsSecondary returns true if resync status is complete as secondary, false otherwise -func (v *VRGInstance) checkResyncCompletionAsSecondary(volRep *volrep.VolumeReplication) bool { +func (v *VRGInstance) checkResyncCompletionAsSecondary(pvc *corev1.PersistentVolumeClaim, + volRep *volrep.VolumeReplication, +) bool { conditionMet, msg := isVRConditionMet(volRep, volrep.ConditionResyncing, metav1.ConditionFalse) if !conditionMet { defaultMsg := "VolumeReplication resource for pvc not syncing as Secondary" - v.updatePVCDataReadyConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, defaultMsg) - v.updatePVCDataProtectedConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, defaultMsg) v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) @@ -1524,10 +1587,10 @@ func (v *VRGInstance) checkResyncCompletionAsSecondary(volRep *volrep.VolumeRepl conditionMet, msg = isVRConditionMet(volRep, volrep.ConditionDegraded, metav1.ConditionFalse) if !conditionMet { defaultMsg := "VolumeReplication resource for pvc is not syncing and is degraded as Secondary" - v.updatePVCDataReadyConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataReadyConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, defaultMsg) - v.updatePVCDataProtectedConditionHelper(volRep.Namespace, volRep.Name, VRGConditionReasonError, msg, + v.updatePVCDataProtectedConditionHelper(pvc.Namespace, pvc.Name, VRGConditionReasonError, msg, defaultMsg) v.log.Info(fmt.Sprintf("%s (VolRep: %s/%s)", defaultMsg, volRep.GetName(), volRep.GetNamespace())) @@ -1536,8 +1599,8 @@ func (v *VRGInstance) checkResyncCompletionAsSecondary(volRep *volrep.VolumeRepl } msg = "VolumeReplication resource for the pvc as Secondary is in sync with Primary" - v.updatePVCDataReadyCondition(volRep.Namespace, volRep.Name, VRGConditionReasonReplicated, msg) - v.updatePVCDataProtectedCondition(volRep.Namespace, volRep.Name, VRGConditionReasonDataProtected, msg) + v.updatePVCDataReadyCondition(pvc.Namespace, pvc.Name, VRGConditionReasonReplicated, msg) + v.updatePVCDataProtectedCondition(pvc.Namespace, pvc.Name, VRGConditionReasonDataProtected, msg) v.log.Info(fmt.Sprintf("data sync completed as both degraded and resyncing are false for"+ " secondary VolRep (%s/%s)", volRep.GetName(), volRep.GetNamespace())) @@ -1545,34 +1608,35 @@ func (v *VRGInstance) checkResyncCompletionAsSecondary(volRep *volrep.VolumeRepl return true } +// isVRConditionMet returns true if the condition is met, and an error mesage if we could not get the +// condition value. func isVRConditionMet(volRep *volrep.VolumeReplication, conditionType string, desiredStatus metav1.ConditionStatus, ) (bool, string) { volRepCondition := findCondition(volRep.Status.Conditions, conditionType) if volRepCondition == nil { - msg := fmt.Sprintf("Failed to get the %s condition from status of VolumeReplication resource.", conditionType) + errorMsg := fmt.Sprintf("Failed to get the %s condition from status of VolumeReplication resource.", + conditionType) - return false, msg + return false, errorMsg } if volRep.GetGeneration() != volRepCondition.ObservedGeneration { - msg := fmt.Sprintf("Stale generation for condition %s from status of VolumeReplication resource.", conditionType) + errorMsg := fmt.Sprintf("Stale generation for condition %s from status of VolumeReplication resource.", + conditionType) - return false, msg + return false, errorMsg } if volRepCondition.Status == metav1.ConditionUnknown { - msg := fmt.Sprintf("Unknown status for condition %s from status of VolumeReplication resource.", conditionType) + errorMsg := fmt.Sprintf("Unknown status for condition %s from status of VolumeReplication resource.", + conditionType) - return false, msg + return false, errorMsg } - if volRepCondition.Status != desiredStatus { - return false, "" - } - - return true, "" + return volRepCondition.Status == desiredStatus, "" } // Disabling unparam linter as currently every invokation of this @@ -1740,7 +1804,7 @@ func setPVCClusterDataProtectedCondition(protectedPVC *ramendrv1alpha1.Protected setVRGClusterDataProtectedCondition(&protectedPVC.Conditions, observedGeneration, message) case VRGConditionReasonUploading: setVRGClusterDataProtectingCondition(&protectedPVC.Conditions, observedGeneration, message) - case VRGConditionReasonUploadError, VRGConditionReasonClusterDataAnnotationFailed: + case VRGConditionReasonUploadError, VRGConditionReasonClusterDataAnnotationFailed, VRGConditionReasonPVOrPVCOwnedByDifferentOwner: setVRGClusterDataUnprotectedCondition(&protectedPVC.Conditions, observedGeneration, reason, message) default: // if appropriate reason is not provided, then treat it as an unknown condition. @@ -1793,6 +1857,8 @@ func (v *VRGInstance) deleteVR(vrNamespacedName types.NamespacedName, log logr.L return nil } + v.log.Info("Deleted VolumeReplication resource %s/%s", vrNamespacedName.Namespace, vrNamespacedName.Name) + return v.ensureVRDeletedFromAPIServer(vrNamespacedName, log) } diff --git a/internal/controller/vrg_volsync_test.go b/internal/controller/vrg_volsync_test.go index ed24da430..783ad8a11 100644 --- a/internal/controller/vrg_volsync_test.go +++ b/internal/controller/vrg_volsync_test.go @@ -17,7 +17,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" volsyncv1alpha1 "github.com/backube/volsync/api/v1alpha1" - snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/internal/controller/volsync" storagev1 "k8s.io/api/storage/v1" diff --git a/ramenctl/ramenctl/command.py b/ramenctl/ramenctl/command.py index 4c56dadfb..613ac089d 100644 --- a/ramenctl/ramenctl/command.py +++ b/ramenctl/ramenctl/command.py @@ -76,8 +76,8 @@ def run(*args): return commands.run(*args) -def watch(*args, log=log.debug): - for line in commands.watch(*args): +def watch(*args, stderr=None, cwd=None, log=log.debug): + for line in commands.watch(*args, stderr=stderr, cwd=cwd): log("%s", line) diff --git a/ramenctl/ramenctl/deploy.py b/ramenctl/ramenctl/deploy.py index 69cd662ab..5c502d249 100644 --- a/ramenctl/ramenctl/deploy.py +++ b/ramenctl/ramenctl/deploy.py @@ -3,9 +3,11 @@ import concurrent.futures import os +import subprocess import tempfile from drenv import kubectl + from . import command IMAGE = "quay.io/ramendr/ramen-operator:latest" @@ -33,32 +35,37 @@ def run(args): command.info("Preparing resources") command.watch("make", "-C", args.source_dir, "resources") - with tempfile.TemporaryDirectory(prefix="ramenctl-deploy-") as tmpdir: - tar = os.path.join(tmpdir, "image.tar") - command.info("Saving image '%s'", args.image) - command.watch("podman", "save", args.image, "-o", tar) + load_image(args) - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [] + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [] - if env["hub"]: - f = executor.submit( - deploy, args, env["hub"], tar, "hub", platform="k8s" - ) - futures.append(f) + if env["hub"]: + f = executor.submit(deploy, args, env["hub"], "hub", platform="k8s") + futures.append(f) - for cluster in env["clusters"]: - f = executor.submit(deploy, args, cluster, tar, "dr-cluster") - futures.append(f) + for cluster in env["clusters"]: + f = executor.submit(deploy, args, cluster, "dr-cluster") + futures.append(f) - for f in concurrent.futures.as_completed(futures): - f.result() + for f in concurrent.futures.as_completed(futures): + f.result() -def deploy(args, cluster, tar, deploy_type, platform="", timeout=120): - command.info("Loading image in cluster '%s'", cluster) - command.watch("minikube", "--profile", cluster, "image", "load", tar) +def load_image(args): + command.info("Loading image '%s'", args.image) + with tempfile.TemporaryDirectory(prefix="ramenctl-deploy-") as tmpdir: + tar = os.path.join(tmpdir, "image.tar") + command.watch("podman", "save", args.image, "-o", tar) + cmd = ["drenv", "load", f"--image={tar}"] + if args.name_prefix: + cmd.append(f"--name-prefix={args.name_prefix}") + cmd.append(os.path.abspath(args.filename)) + work_dir = os.path.join(args.source_dir, "test") if args.source_dir else None + command.watch(*cmd, stderr=subprocess.STDOUT, cwd=work_dir) + +def deploy(args, cluster, deploy_type, platform="", timeout=120): command.info("Deploying ramen operator in cluster '%s'", cluster) overlay = os.path.join(args.source_dir, f"config/{deploy_type}/default", platform) yaml = kubectl.kustomize(overlay, load_restrictor="LoadRestrictionsNone") diff --git a/test/Makefile b/test/Makefile index 75ea8d9a4..7b58f13e5 100644 --- a/test/Makefile +++ b/test/Makefile @@ -5,6 +5,9 @@ # hardware acceleration for VMs. DRIVER ?= vm +# drenv start timeout in seconds +TIMEOUT ?= 600 + env := envs/$(DRIVER).yaml prefix := drenv-test- @@ -50,7 +53,7 @@ coverage-html: xdg-open htmlcov/index.html cluster: - drenv start --name-prefix $(prefix) $(env) + drenv start --name-prefix $(prefix) $(env) --verbose --timeout $(TIMEOUT) clean: drenv delete --name-prefix $(prefix) $(env) diff --git a/test/README.md b/test/README.md index 6114581b0..d423e363f 100644 --- a/test/README.md +++ b/test/README.md @@ -8,7 +8,7 @@ SPDX-License-Identifier: Apache-2.0 This directory provides tools and configuration for creating Ramen test environment. -## Setup +## Setup on Linux 1. Setup a development environment as describe in [developer quick start guide](../docs/devel-quick-start.md) @@ -41,7 +41,7 @@ environment. 1. Install `subctl` tool, See [Submariner subctl installation](https://submariner.io/operations/deployment/subctl/) for the details. - Version v0.17.0 or later is required. + Version v0.18.0 or later is required. 1. Install the `velero` tool @@ -110,14 +110,82 @@ environment. 1. Install the `kubectl-gather` plugin ``` - curl -L -o kubectl-gather https://github.com/nirs/kubectl-gather/releases/download/v0.4.1/kubectl-gather-v0.4.1-linux-amd64 + curl -L -o kubectl-gather https://github.com/nirs/kubectl-gather/releases/download/v0.5.1/kubectl-gather-v0.5.1-linux-amd64 sudo install kubectl-gather /usr/local/bin rm kubectl-gather ``` For more info see [kubectl-gather](https://github.com/nirs/kubectl-gather) -### Testing that drenv is healthy +## Setup on macOS + +1. Install the [Homebrew package manager](https://brew.sh/) + +1. Install required packages + + ``` + brew install go kubectl kustomize helm velero virtctl minio-mc argocd + ``` + +1. Install the `clusteradm` tool. See + [Install clusteradm CLI tool](https://open-cluster-management.io/getting-started/installation/start-the-control-plane/#install-clusteradm-cli-tool) + for the details. Version v0.8.1 or later is required. + +1. Install the `subctl` tool, See + [Submariner subctl installation](https://submariner.io/operations/deployment/subctl/) + for the details. Version v0.18.0 or later is required. + +1. Install the `kubectl-gather` plugin + + ``` + curl -L -o kubectl-gather https://github.com/nirs/kubectl-gather/releases/download/v0.5.1/kubectl-gather-v0.5.1-darwin-arm64 + sudo install kubectl-gather /usr/local/bin + rm kubectl-gather + ``` + + For more info see [kubectl-gather](https://github.com/nirs/kubectl-gather) + +1. Install `lima` from source + + > [!NOTE] + > Do not install lima from brew, it is too old. + + Clone and build lima: + + ``` + git clone https://github.com/lima-vm/lima.git + cd lima + make + ``` + + Edit `~/.zshrc` and add `$HOME/lima/_output/bin` directory to the PATH: + + ``` + PATH="$HOME/lima/_output/bin:$PATH" + export PATH + ``` + + Open a new shell or run this in the current shell: + + ``` + export PATH="$HOME/lima/_output/bin:$PATH" + ``` + +1. Install `socket_vmnet` from source + + > [!IMPORTANT] + > Do not install socket_vmnet from brew, it is insecure. + + ``` + git clone https://github.com/lima-vm/socket_vmnet.git + cd socket_vmnet + sudo make PREFIX=/opt/socket_vmnet install.bin + sudo make PREFIX=/opt/socket_vmnet install.launchd + ``` + + For more info see [Installing socket_vmnet from source](https://github.com/lima-vm/socket_vmnet?tab=readme-ov-file#from-source) + +## Testing that drenv is healthy Run this script to make sure `drenv` works: @@ -539,9 +607,11 @@ $ drenv delete envs/example.yaml - `templates`: templates for creating new profiles. - `name`: profile name. - - `external`: true if this is existing external cluster. In this - case the tool will not start a minikube cluster and all other - options are ignored. + - `provider`: cluster provider. The default provider is "minikube", + creating cluster using VM or containers. Use "external" to use + exsiting clusters not managed by `drenv`. Use the special value + "$provider" to select the best provider for the host. (default + "$provider") - `driver`: The minikube driver. On Linux, the default drivers are kvm2 and docker for VMs and containers. On MacOS, the defaults are hyperkit and podman. Use "$vm" and "$container" values to use the recommended VM and diff --git a/test/addons/argocd/test b/test/addons/argocd/test index df1ed6d5e..7653e9629 100755 --- a/test/addons/argocd/test +++ b/test/addons/argocd/test @@ -83,7 +83,13 @@ def undeploy_guestbook(hub, cluster): print(line) print(f"Deleting namespace argocd-test in cluster {cluster}") - kubectl.delete("namespace", "argocd-test", "--wait=false", context=cluster) + kubectl.delete( + "namespace", + "argocd-test", + "--wait=false", + "--ignore-not-found", + context=cluster, + ) def wait_until_guestbook_is_deleted(hub, cluster): diff --git a/test/addons/csi-addons/cache b/test/addons/csi-addons/cache index 7dff0bd5b..2b1a856b1 100755 --- a/test/addons/csi-addons/cache +++ b/test/addons/csi-addons/cache @@ -7,4 +7,4 @@ import os from drenv import cache os.chdir(os.path.dirname(__file__)) -cache.refresh(".", "addons/csi-addons-0.9.0.yaml") +cache.refresh(".", "addons/csi-addons-0.10.0.yaml") diff --git a/test/addons/csi-addons/kustomization.yaml b/test/addons/csi-addons/kustomization.yaml index db4c6081e..5aeb4a615 100644 --- a/test/addons/csi-addons/kustomization.yaml +++ b/test/addons/csi-addons/kustomization.yaml @@ -3,10 +3,10 @@ --- resources: - - https://raw.githubusercontent.com/csi-addons/kubernetes-csi-addons/v0.9.0/deploy/controller/crds.yaml - - https://raw.githubusercontent.com/csi-addons/kubernetes-csi-addons/v0.9.0/deploy/controller/rbac.yaml - - https://raw.githubusercontent.com/csi-addons/kubernetes-csi-addons/v0.9.0/deploy/controller/setup-controller.yaml + - https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.10.0/crds.yaml + - https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.10.0/rbac.yaml + - https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.10.0/setup-controller.yaml images: - name: quay.io/csiaddons/k8s-controller - newTag: v0.9.0 + newTag: v0.10.0 diff --git a/test/addons/csi-addons/start b/test/addons/csi-addons/start index 1f3a369b7..d0f3351e2 100755 --- a/test/addons/csi-addons/start +++ b/test/addons/csi-addons/start @@ -12,7 +12,7 @@ from drenv import cache def deploy(cluster): print("Deploying csi addon for volume replication") - path = cache.get(".", "addons/csi-addons-0.9.0.yaml") + path = cache.get(".", "addons/csi-addons-0.10.0.yaml") kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/external-snapshotter/cache b/test/addons/external-snapshotter/cache new file mode 100755 index 000000000..2e6eba057 --- /dev/null +++ b/test/addons/external-snapshotter/cache @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import os +from drenv import cache + +os.chdir(os.path.dirname(__file__)) +cache.refresh("crds", "addons/external-snapshotter-crds-8.1.0.yaml") +cache.refresh("controller", "addons/external-snapshotter-controller-8.1.0.yaml") diff --git a/test/addons/external-snapshotter/controller/kustomization.yaml b/test/addons/external-snapshotter/controller/kustomization.yaml new file mode 100644 index 000000000..51d5227aa --- /dev/null +++ b/test/addons/external-snapshotter/controller/kustomization.yaml @@ -0,0 +1,5 @@ +--- +resources: + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml +namespace: kube-system diff --git a/test/addons/external-snapshotter/crds/kustomization.yaml b/test/addons/external-snapshotter/crds/kustomization.yaml new file mode 100644 index 000000000..242778bdf --- /dev/null +++ b/test/addons/external-snapshotter/crds/kustomization.yaml @@ -0,0 +1,8 @@ +--- +resources: + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml + - https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.1.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml diff --git a/test/addons/external-snapshotter/start b/test/addons/external-snapshotter/start new file mode 100755 index 000000000..135ef6b87 --- /dev/null +++ b/test/addons/external-snapshotter/start @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys + +from drenv import kubectl +from drenv import cache + + +def deploy(cluster): + print("Deploying crds") + path = cache.get("crds", "addons/external-snapshotter-crds-8.1.0.yaml") + kubectl.apply("--filename", path, context=cluster) + + print("Waiting until crds are established") + kubectl.wait("--for=condition=established", "--filename", path, context=cluster) + + print("Deploying snapshot-controller") + path = cache.get("controller", "addons/external-snapshotter-controller-8.1.0.yaml") + kubectl.apply("--filename", path, context=cluster) + + +def wait(cluster): + print("Waiting until snapshot-controller is rolled out") + kubectl.rollout( + "status", + "deploy/snapshot-controller", + "--namespace=kube-system", + "--timeout=300s", + context=cluster, + ) + + +if len(sys.argv) != 2: + print(f"Usage: {sys.argv[0]} cluster") + sys.exit(1) + +os.chdir(os.path.dirname(__file__)) +cluster = sys.argv[1] + +deploy(cluster) +wait(cluster) diff --git a/test/addons/minio/minio.yaml b/test/addons/minio/minio.yaml index 843f15ea2..3f4efb4be 100644 --- a/test/addons/minio/minio.yaml +++ b/test/addons/minio/minio.yaml @@ -9,6 +9,21 @@ metadata: name: minio --- apiVersion: v1 +kind: PersistentVolume +metadata: + name: minio-storage-pv + labels: + component: minio +spec: + storageClassName: manual + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + hostPath: + path: /mnt/minio +--- +apiVersion: v1 kind: PersistentVolumeClaim metadata: namespace: minio @@ -16,8 +31,9 @@ metadata: labels: component: minio spec: - accessModes: ["ReadWriteOnce"] - storageClassName: "standard" + accessModes: + - ReadWriteOnce + storageClassName: manual resources: requests: storage: 10Gi @@ -47,7 +63,7 @@ spec: readOnly: false containers: - name: minio - image: quay.io/minio/minio:RELEASE.2024-03-15T01-07-19Z + image: quay.io/minio/minio:RELEASE.2024-08-29T01-40-52Z imagePullPolicy: IfNotPresent resources: limits: diff --git a/test/addons/rbd-mirror/start b/test/addons/rbd-mirror/start index c21d4cafc..732641207 100755 --- a/test/addons/rbd-mirror/start +++ b/test/addons/rbd-mirror/start @@ -86,7 +86,7 @@ def disable_rbd_mirror_debug_logs(cluster): def configure_rbd_mirroring(cluster, peer_info): print(f"Applying rbd mirror secret in cluster '{cluster}'") - template = drenv.template("rbd-mirror-secret.yaml") + template = drenv.template("start-data/rbd-mirror-secret.yaml") yaml = template.substitute(peer_info) kubectl.apply( "--filename=-", @@ -107,11 +107,7 @@ def configure_rbd_mirroring(cluster, peer_info): ) print(f"Apply rbd mirror to cluster '{cluster}'") - kubectl.apply( - "--filename=rbd-mirror.yaml", - "--namespace=rook-ceph", - context=cluster, - ) + kubectl.apply("--kustomize=start-data", context=cluster) def wait_until_rbd_mirror_is_ready(cluster): @@ -222,15 +218,6 @@ def restart_rbd_mirror_daemon(cluster): ) -def deploy_vrc_sample(cluster): - print(f"Applying vrc sample in cluster '{cluster}'") - kubectl.apply( - "--filename=vrc-sample.yaml", - "--namespace=rook-ceph", - context=cluster, - ) - - if len(sys.argv) != 3: print(f"Usage: {sys.argv[0]} cluster1 cluster2") sys.exit(1) @@ -265,7 +252,4 @@ log_blocklist(cluster2) wait_until_pool_mirroring_is_healthy(cluster1) wait_until_pool_mirroring_is_healthy(cluster2) -deploy_vrc_sample(cluster1) -deploy_vrc_sample(cluster2) - print("Mirroring was setup successfully") diff --git a/test/addons/rbd-mirror/start-data/kustomization.yaml b/test/addons/rbd-mirror/start-data/kustomization.yaml new file mode 100644 index 000000000..a3d38214e --- /dev/null +++ b/test/addons/rbd-mirror/start-data/kustomization.yaml @@ -0,0 +1,10 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: +- vrc-sample.yaml +- vgrc-sample.yaml +- rbd-mirror.yaml + +namespace: rook-ceph diff --git a/test/addons/rbd-mirror/rbd-mirror-secret.yaml b/test/addons/rbd-mirror/start-data/rbd-mirror-secret.yaml similarity index 100% rename from test/addons/rbd-mirror/rbd-mirror-secret.yaml rename to test/addons/rbd-mirror/start-data/rbd-mirror-secret.yaml diff --git a/test/addons/rbd-mirror/rbd-mirror.yaml b/test/addons/rbd-mirror/start-data/rbd-mirror.yaml similarity index 100% rename from test/addons/rbd-mirror/rbd-mirror.yaml rename to test/addons/rbd-mirror/start-data/rbd-mirror.yaml diff --git a/test/addons/rbd-mirror/start-data/vgrc-sample.yaml b/test/addons/rbd-mirror/start-data/vgrc-sample.yaml new file mode 100644 index 000000000..e4eae360d --- /dev/null +++ b/test/addons/rbd-mirror/start-data/vgrc-sample.yaml @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeGroupReplicationClass +metadata: + name: vgrc-sample +spec: + provisioner: rook-ceph.rbd.csi.ceph.com + parameters: + replication.storage.openshift.io/replication-secret-name: rook-csi-rbd-provisioner + replication.storage.openshift.io/replication-secret-namespace: rook-ceph + schedulingInterval: 1m diff --git a/test/addons/rbd-mirror/vrc-sample.yaml b/test/addons/rbd-mirror/start-data/vrc-sample.yaml similarity index 100% rename from test/addons/rbd-mirror/vrc-sample.yaml rename to test/addons/rbd-mirror/start-data/vrc-sample.yaml diff --git a/test/addons/rbd-mirror/test b/test/addons/rbd-mirror/test index 1a690f9a7..97d5732fd 100755 --- a/test/addons/rbd-mirror/test +++ b/test/addons/rbd-mirror/test @@ -55,12 +55,8 @@ def rbd_mirror_image_status(cluster, image): def test_volume_replication(primary, secondary): - print(f"Deploying pvc {PVC_NAME} in cluster '{primary}'") - kubectl.apply( - f"--filename={PVC_NAME}.yaml", - "--namespace=rook-ceph", - context=primary, - ) + print(f"Deploying pvc {PVC_NAME} and vr vr-sample in cluster '{primary}'") + kubectl.apply("--kustomize=test-data", context=primary) print(f"Waiting until pvc {PVC_NAME} is bound in cluster '{primary}'") kubectl.wait( @@ -71,13 +67,6 @@ def test_volume_replication(primary, secondary): context=primary, ) - print(f"Deploying vr vr-sample in cluster '{primary}'") - kubectl.apply( - "--filename=vr-sample.yaml", - "--namespace=rook-ceph", - context=primary, - ) - print(f"Waiting until vr vr-sample is completed in cluster '{primary}'") kubectl.wait( "volumereplication/vr-sample", @@ -139,19 +128,8 @@ def test_volume_replication(primary, secondary): image_status = rbd_mirror_image_status(primary, rbd_image) print(json.dumps(image_status, indent=2)) - print(f"Deleting vr vr-sample in primary cluster '{primary}'") - kubectl.delete( - "volumereplication/vr-sample", - "--namespace=rook-ceph", - context=primary, - ) - - print(f"Deleting pvc {PVC_NAME} in primary cluster '{primary}'") - kubectl.delete( - f"pvc/{PVC_NAME}", - "--namespace=rook-ceph", - context=primary, - ) + print(f"Deleting pvc {PVC_NAME} and vr vr-sample in primary cluster '{primary}'") + kubectl.delete("--kustomize=test-data", context=primary) print(f"Replication from cluster '{primary}' to cluster '{secondary}' succeeded") diff --git a/test/addons/rbd-mirror/test-data/kustomization.yaml b/test/addons/rbd-mirror/test-data/kustomization.yaml new file mode 100644 index 000000000..7c20776d5 --- /dev/null +++ b/test/addons/rbd-mirror/test-data/kustomization.yaml @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: +- vr-sample.yaml +- rbd-pvc.yaml + +namespace: rook-ceph diff --git a/test/addons/rbd-mirror/rbd-pvc.yaml b/test/addons/rbd-mirror/test-data/rbd-pvc.yaml similarity index 100% rename from test/addons/rbd-mirror/rbd-pvc.yaml rename to test/addons/rbd-mirror/test-data/rbd-pvc.yaml diff --git a/test/addons/rbd-mirror/test-data/vgr-sample.yaml b/test/addons/rbd-mirror/test-data/vgr-sample.yaml new file mode 100644 index 000000000..0a927ccfc --- /dev/null +++ b/test/addons/rbd-mirror/test-data/vgr-sample.yaml @@ -0,0 +1,17 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +apiVersion: replication.storage.openshift.io/v1alpha1 +kind: VolumeGroupReplication +metadata: + name: vgr-sample +spec: + volumeGroupReplicationClassName: vgrc-sample + replicationState: primary + source: + selector: + matchLabels: + appname: busybox + ramendr.openshift.io/consistency-group: rook-ceph-storage-id + autoResync: true diff --git a/test/addons/rbd-mirror/vr-sample.yaml b/test/addons/rbd-mirror/test-data/vr-sample.yaml similarity index 100% rename from test/addons/rbd-mirror/vr-sample.yaml rename to test/addons/rbd-mirror/test-data/vr-sample.yaml diff --git a/test/addons/rook-pool/snapshot-class.yaml b/test/addons/rook-pool/snapshot-class.yaml new file mode 100644 index 000000000..ed2f5dbc4 --- /dev/null +++ b/test/addons/rook-pool/snapshot-class.yaml @@ -0,0 +1,16 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +# yamllint disable rule:line-length +# Drived from https://raw.githubusercontent.com/rook/rook/release-1.15/deploy/examples/csi/rbd/snapshotclass.yaml +--- +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: csi-rbdplugin-snapclass +driver: rook-ceph.rbd.csi.ceph.com +parameters: + clusterID: rook-ceph + csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph +deletionPolicy: Delete diff --git a/test/addons/rook-pool/start b/test/addons/rook-pool/start index edb39f531..b06b5ca2e 100755 --- a/test/addons/rook-pool/start +++ b/test/addons/rook-pool/start @@ -14,10 +14,15 @@ from drenv import kubectl def deploy(cluster): - print("Creating rbd pool and storage class") + print("Creating RBD pool and storage/snapshot classes") + + template = drenv.template("storage-class.yaml") + yaml = template.substitute(cluster=cluster) + + kubectl.apply("--filename=-", input=yaml, context=cluster) kubectl.apply( "--filename=replica-pool.yaml", - "--filename=storage-class.yaml", + "--filename=snapshot-class.yaml", context=cluster, ) diff --git a/test/addons/rook-pool/storage-class.yaml b/test/addons/rook-pool/storage-class.yaml index 36a077b7b..bb4786d80 100644 --- a/test/addons/rook-pool/storage-class.yaml +++ b/test/addons/rook-pool/storage-class.yaml @@ -6,6 +6,8 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-ceph-block + labels: + ramendr.openshift.io/storageid: rook-ceph-$cluster-1 provisioner: rook-ceph.rbd.csi.ceph.com parameters: clusterID: rook-ceph @@ -20,3 +22,4 @@ parameters: csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph csi.storage.k8s.io/fstype: ext4 reclaimPolicy: Delete +allowVolumeExpansion: true diff --git a/test/addons/submariner/base/src/pod.yaml b/test/addons/submariner/base/src/pod.yaml index a849c52a1..c2bbbce09 100644 --- a/test/addons/submariner/base/src/pod.yaml +++ b/test/addons/submariner/base/src/pod.yaml @@ -17,5 +17,7 @@ spec: - -c - | trap exit TERM - sleep 300 & - wait + while true; do + sleep 10 & + wait + done diff --git a/test/addons/submariner/start b/test/addons/submariner/start index 324f6e6fe..4f0c69ba5 100755 --- a/test/addons/submariner/start +++ b/test/addons/submariner/start @@ -3,6 +3,7 @@ # SPDX-FileCopyrightText: The RamenDR authors # SPDX-License-Identifier: Apache-2.0 +import json import os import sys @@ -11,7 +12,7 @@ from drenv import cluster as drenv_cluster from drenv import kubectl from drenv import subctl -VERSION = "0.17.0" +VERSION = "0.18.0" NAMESPACE = "submariner-operator" @@ -53,6 +54,9 @@ def join_cluster(cluster, broker_info): print(f"Waiting until cluster '{cluster}' is ready") drenv_cluster.wait_until_ready(cluster) + print(f"Annotating nodes in '{cluster}'") + annotate_nodes(cluster) + print(f"Joining cluster '{cluster}' to broker") subctl.join( broker_info, @@ -63,6 +67,29 @@ def join_cluster(cluster, broker_info): ) +def annotate_nodes(cluster): + """ + Annotate all nodes with the gateway public IP address. Required when is + having multiple interfaces and some networks are not shared (e.g. lima user + network). + """ + out = kubectl.get("node", "--output=json", context=cluster) + nodes = json.loads(out) + for node in nodes["items"]: + for item in node["status"]["addresses"]: + if item["type"] == "InternalIP": + break + else: + raise RuntimeError(f"Cannot find node '{node['metadata']['name']}' address") + print(f"Annotating '{node['metadata']['name']}' address '{item['address']}'") + kubectl.annotate( + f"node/{node['metadata']['name']}", + {"gateway.submariner.io/public-ip": f"ipv4:{item['address']}"}, + overwrite=True, + context=cluster, + ) + + def wait_for_cluster(cluster): print(f"Waiting for submariner deployuments in cluster '{cluster}'") wait_for_deployments(cluster, CLUSTER_DEPLOYMENTS, NAMESPACE) @@ -92,6 +119,9 @@ os.chdir(os.path.dirname(__file__)) broker = sys.argv[1] clusters = sys.argv[2:] +for cluster in [broker, *clusters]: + drenv_cluster.wait_until_ready(cluster) + broker_info = deploy_broker(broker) for cluster in clusters: diff --git a/test/addons/submariner/test b/test/addons/submariner/test index 252410c81..1ca8d9279 100755 --- a/test/addons/submariner/test +++ b/test/addons/submariner/test @@ -162,45 +162,7 @@ def service_address(namespace): return f"{SERVICE}.{namespace}.svc.clusterset.local" -def wait_for_dns(cluster, namespace, timeout=240): - """ - Unfortunatley even when we wait for eveything, DNS lookup can fail for more - than 60 seconds after deploying submariner on a new cluster. After the - initial DNS always succceeds on the first try. - - We see random failures with 120 seconds timeout, trying 240. - """ - start = time.monotonic() - deadline = start + timeout - dns_name = service_address(namespace) - delay = 1 - - while True: - print(f"Looking up '{dns_name}' in cluster '{cluster}'") - try: - out = kubectl.exec( - "test", - f"--namespace={namespace}", - "--", - "nslookup", - dns_name, - context=cluster, - ) - except commands.Error as e: - if time.monotonic() > deadline: - raise - - print(f"Lookup failed: {e}") - print(f"Retrying in {delay} seconds") - time.sleep(delay) - delay = min(delay * 2, 16) - else: - print(f"Lookup completed in {time.monotonic() - start:.3f} seconds") - print(out) - break - - -def test_connectivity(cluster, namespace, timeout=60): +def test_connectivity(cluster, namespace, timeout=300): """ Test that cluster can access service exported on the other cluster. @@ -271,10 +233,7 @@ wait_for_service_import(clusters[1], NS1) wait_for_service_export(clusters[1], NS2) wait_for_service_import(clusters[0], NS2) -wait_for_dns(clusters[1], NS1) test_connectivity(clusters[1], NS1) - -wait_for_dns(clusters[0], NS2) test_connectivity(clusters[0], NS2) unexport_service(clusters[0], NS1) diff --git a/test/addons/volsync/test b/test/addons/volsync/test index d1f79b085..61c236b69 100755 --- a/test/addons/volsync/test +++ b/test/addons/volsync/test @@ -7,6 +7,8 @@ import json import os import sys +import yaml + import drenv from drenv import kubectl from drenv import subctl @@ -130,7 +132,7 @@ def run_replication(cluster): ) status = json.loads(out) print("Replication status:") - print(json.dumps(status, indent=2)) + print(yaml.dump(status)) def teardown(cluster1, cluster2): @@ -138,8 +140,13 @@ def teardown(cluster1, cluster2): Remove deployments from both clusters. This also deletes additonal resources created in the same namespace. """ - print("Cleaning up clusters") + print(f"Delete replication source in cluster '{cluster1}'") + kubectl.delete("--filename", "source/replication-src.yaml", context=cluster1) + + print(f"Unexporting volsync service in cluster '{cluster2}'") subctl.unexport("service", VOLSYNC_SERVICE, cluster2, namespace=NAMESPACE) + + print(f"Delete source in cluster '{cluster1}'") kubectl.delete( "--kustomize", "source", @@ -147,6 +154,8 @@ def teardown(cluster1, cluster2): "--wait=false", context=cluster1, ) + + print(f"Delete destination in cluster '{cluster2}'") kubectl.delete( "--kustomize", "destination", @@ -154,8 +163,12 @@ def teardown(cluster1, cluster2): "--wait=false", context=cluster2, ) - kubectl.delete("--kustomize", "source", "--ignore-not-found", context=cluster1) - kubectl.delete("--kustomize", "destination", "--ignore-not-found", context=cluster2) + + print(f"Waiting until namespace '{NAMESPACE}' is deleted in cluster '{cluster1}'") + kubectl.wait("ns", NAMESPACE, "--for=delete", "--timeout=120s", context=cluster1) + + print(f"Waiting until namespace '{NAMESPACE}' is deleted in cluster '{cluster2}'") + kubectl.wait("ns", NAMESPACE, "--for=delete", "--timeout=120s", context=cluster2) if len(sys.argv) != 3: diff --git a/test/drenv/__init__.py b/test/drenv/__init__.py index 38ee830c5..6218b9c85 100644 --- a/test/drenv/__init__.py +++ b/test/drenv/__init__.py @@ -105,7 +105,7 @@ def temporary_kubeconfig(prefix="drenv."): """ with tempfile.TemporaryDirectory(prefix=prefix) as tmpdir: kubeconfig = os.path.join(tmpdir, "kubeconfig") - out = kubectl.config("view", "--output=yaml") + out = kubectl.config("view", "--flatten", "--output=yaml") with open(kubeconfig, "w") as f: f.write(out) env = dict(os.environ) diff --git a/test/drenv/__main__.py b/test/drenv/__main__.py index d67b2dd2d..7fd709483 100644 --- a/test/drenv/__main__.py +++ b/test/drenv/__main__.py @@ -13,18 +13,16 @@ from functools import partial -import yaml - import drenv from . import cache from . import cluster from . import commands -from . import containerd from . import envfile from . import kubectl -from . import minikube +from . import providers from . import ramen from . import shutdown +from . import yaml ADDONS_DIR = "addons" @@ -78,6 +76,11 @@ def parse_args(): metavar="N", help="maximum number of workers per profile", ) + p.add_argument( + "--timeout", + type=int, + help="time in seconds to wait until clsuter is started", + ) p = add_command(sp, "stop", do_stop, help="stop an environment") p.add_argument( @@ -108,14 +111,21 @@ def parse_args(): help="if specified, comma separated list of namespaces to gather data from", ) + p = add_command(sp, "load", do_load, help="load an image into the cluster") + p.add_argument( + "--image", + required=True, + help="image to load into the cluster in tar format", + ) + add_command(sp, "delete", do_delete, help="delete an environment") add_command(sp, "suspend", do_suspend, help="suspend virtual machines") add_command(sp, "resume", do_resume, help="resume virtual machines") add_command(sp, "dump", do_dump, help="dump an environment yaml") add_command(sp, "clear", do_clear, help="cleared cached resources", envfile=False) - add_command(sp, "setup", do_setup, help="setup minikube for drenv", envfile=False) - add_command(sp, "cleanup", do_cleanup, help="cleanup minikube", envfile=False) + add_command(sp, "setup", do_setup, help="setup host for drenv") + add_command(sp, "cleanup", do_cleanup, help="cleanup host") return parser.parse_args() @@ -183,13 +193,19 @@ def handle_termination_signal(signo, frame): def do_setup(args): - logging.info("[main] Setting up minikube for drenv") - minikube.setup_files() + env = load_env(args) + for name in set(p["provider"] for p in env["profiles"]): + logging.info("[main] Setting up '%s' for drenv", name) + provider = providers.get(name) + provider.setup() def do_cleanup(args): - logging.info("[main] Cleaning up minikube") - minikube.cleanup_files() + env = load_env(args) + for name in set(p["provider"] for p in env["profiles"]): + logging.info("[main] Cleaning up '%s' for drenv", name) + provider = providers.get(name) + provider.cleanup() def do_clear(args): @@ -295,18 +311,32 @@ def do_delete(args): ) +def do_load(args): + env = load_env(args) + start = time.monotonic() + logging.info("[%s] Loading image '%s'", env["name"], args.image) + execute(load_image, env["profiles"], "profiles", image=args.image) + logging.info( + "[%s] Image loaded in %.2f seconds", + env["name"], + time.monotonic() - start, + ) + + def do_suspend(args): env = load_env(args) logging.info("[%s] Suspending environment", env["name"]) for profile in env["profiles"]: - run("virsh", "-c", "qemu:///system", "suspend", profile["name"]) + provider = providers.get(profile["provider"]) + provider.suspend(profile) def do_resume(args): env = load_env(args) logging.info("[%s] Resuming environment", env["name"]) for profile in env["profiles"]: - run("virsh", "-c", "qemu:///system", "resume", profile["name"]) + provider = providers.get(profile["provider"]) + provider.resume(profile) def do_dump(args): @@ -351,18 +381,14 @@ def collect_addons(env): def start_cluster(profile, hooks=(), args=None, **options): - if profile["external"]: - logging.debug("[%s] Skipping external cluster", profile["name"]) - else: - is_restart = minikube_profile_exists(profile["name"]) - start_minikube_cluster(profile, verbose=args.verbose) - if profile["containerd"]: - logging.info("[%s] Configuring containerd", profile["name"]) - containerd.configure(profile) - if is_restart: - restart_failed_deployments(profile) - else: - minikube.load_files(profile["name"]) + provider = providers.get(profile["provider"]) + existing = provider.exists(profile) + + provider.start(profile, verbose=args.verbose, timeout=args.timeout) + provider.configure(profile, existing=existing) + + if existing: + restart_failed_deployments(profile) if hooks: execute( @@ -387,17 +413,14 @@ def stop_cluster(profile, hooks=(), **options): allow_failure=True, ) - if profile["external"]: - logging.debug("[%s] Skipping external cluster", profile["name"]) - elif cluster_status != cluster.UNKNOWN: - stop_minikube_cluster(profile) + if cluster_status != cluster.UNKNOWN: + provider = providers.get(profile["provider"]) + provider.stop(profile) def delete_cluster(profile, **options): - if profile["external"]: - logging.debug("[%s] Skipping external cluster", profile["name"]) - else: - delete_minikube_cluster(profile) + provider = providers.get(profile["provider"]) + provider.delete(profile) profile_config = drenv.config_dir(profile["name"]) if os.path.exists(profile_config): @@ -405,78 +428,17 @@ def delete_cluster(profile, **options): shutil.rmtree(profile_config) -def minikube_profile_exists(name): - out = minikube.profile("list", output="json") - profiles = json.loads(out) - for profile in profiles["valid"]: - if profile["Name"] == name: - return True - return False +def load_image(profile, image=None, **options): + provider = providers.get(profile["provider"]) + provider.load(profile, image) -def start_minikube_cluster(profile, verbose=False): - start = time.monotonic() - logging.info("[%s] Starting minikube cluster", profile["name"]) - - minikube.start( - profile["name"], - driver=profile["driver"], - container_runtime=profile["container_runtime"], - extra_disks=profile["extra_disks"], - disk_size=profile["disk_size"], - network=profile["network"], - nodes=profile["nodes"], - cni=profile["cni"], - cpus=profile["cpus"], - memory=profile["memory"], - addons=profile["addons"], - service_cluster_ip_range=profile["service_cluster_ip_range"], - extra_config=profile["extra_config"], - feature_gates=profile["feature_gates"], - alsologtostderr=verbose, - ) - - logging.info( - "[%s] Cluster started in %.2f seconds", - profile["name"], - time.monotonic() - start, - ) - - -def stop_minikube_cluster(profile): - start = time.monotonic() - logging.info("[%s] Stopping cluster", profile["name"]) - minikube.stop(profile["name"]) - logging.info( - "[%s] Cluster stopped in %.2f seconds", - profile["name"], - time.monotonic() - start, - ) - - -def delete_minikube_cluster(profile): - start = time.monotonic() - logging.info("[%s] Deleting cluster", profile["name"]) - minikube.delete(profile["name"]) - logging.info( - "[%s] Cluster deleted in %.2f seconds", - profile["name"], - time.monotonic() - start, - ) - - -def restart_failed_deployments(profile, initial_wait=30): +def restart_failed_deployments(profile): """ - When restarting, kubectl can report stale status for a while, before it - starts to report real status. Then it takes a while until all deployments - become available. - - We first wait for initial_wait seconds to give Kubernetes chance to fail - liveness and readiness checks. Then we restart for failed deployments. + When restarting after failure, some deployment may enter failing state. + This is not handled by the addons. Restarting the deployment solves this + issue. This may also be solved at the addon level. """ - logging.info("[%s] Waiting for fresh status", profile["name"]) - time.sleep(initial_wait) - logging.info("[%s] Looking up failed deployments", profile["name"]) debug = partial(logging.debug, f"[{profile['name']}] %s") diff --git a/test/drenv/cluster.py b/test/drenv/cluster.py index 4afe546bc..aca12b3c3 100644 --- a/test/drenv/cluster.py +++ b/test/drenv/cluster.py @@ -5,6 +5,7 @@ import time from . import kubectl +from . import commands # Cluster does not have kubeconfig. UNKNOWN = "unknwon" @@ -12,7 +13,7 @@ # Cluster has kubeconfig. CONFIGURED = "configured" -# APIServer is responding. +# APIServer is ready. READY = "ready" @@ -20,21 +21,22 @@ def status(name): if not kubeconfig(name): return UNKNOWN - out = kubectl.version(context=name, output="json") - version_info = json.loads(out) - if "serverVersion" not in version_info: + try: + readyz(name) + except commands.Error: return CONFIGURED return READY -def wait_until_ready(name, timeout=600): +def wait_until_ready(name, timeout=600, log=print): """ Wait until a cluster is ready. This is useful when starting profiles concurrently, when one profile needs - to wait for another profile. + to wait for another profile, or when restarting a stopped cluster. """ + log(f"Waiting until cluster '{name}' is ready") deadline = time.monotonic() + timeout delay = min(1.0, timeout / 60) last_status = None @@ -43,7 +45,7 @@ def wait_until_ready(name, timeout=600): current_status = status(name) if current_status != last_status: - print(f"Cluster '{name}' is {current_status}") + log(f"Cluster '{name}' is {current_status}") last_status = current_status if current_status == READY: @@ -77,3 +79,14 @@ def kubeconfig(context_name): return cluster return {} + + +def readyz(name, verbose=False): + """ + Check if API server is ready. + https://kubernetes.io/docs/reference/using-api/health-checks/ + """ + path = "/readyz" + if verbose: + path += "?verbose" + return kubectl.get("--raw", path, context=name) diff --git a/test/drenv/commands.py b/test/drenv/commands.py index af35538ba..b550bd3b9 100644 --- a/test/drenv/commands.py +++ b/test/drenv/commands.py @@ -70,7 +70,7 @@ class StreamTimeout(Exception): """ -def run(*args, input=None, decode=True, env=None): +def run(*args, input=None, stdin=None, decode=True, env=None, cwd=None): """ Run command args and return the output of the command. @@ -90,11 +90,11 @@ def run(*args, input=None, decode=True, env=None): try: p = subprocess.Popen( args, - # Avoid blocking foerver if there is no input. - stdin=subprocess.PIPE if input else subprocess.DEVNULL, + stdin=_select_stdin(input, stdin), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, + cwd=cwd, ) except OSError as e: raise Error(args, f"Could not execute: {e}").with_exception(e) @@ -108,10 +108,25 @@ def run(*args, input=None, decode=True, env=None): return output.decode() if decode else output -def watch(*args, input=None, keepends=False, decode=True, timeout=None, env=None): +def watch( + *args, + input=None, + keepends=False, + decode=True, + timeout=None, + env=None, + stdin=None, + stderr=subprocess.PIPE, + cwd=None, +): """ Run command args, iterating over lines read from the child process stdout. + Some commands have no output and log everyting to stderr (like drenv). To + watch the output call with stderr=subprocess.STDOUT. When such command + fails, we have always have empty error, since the content was already + yielded to the caller. + Assumes that the child process output UTF-8. Will raise if the command outputs binary data. This is not a problem in this projects since all our commands are text based. @@ -141,11 +156,11 @@ def watch(*args, input=None, keepends=False, decode=True, timeout=None, env=None try: p = subprocess.Popen( args, - # Avoid blocking foerver if there is no input. - stdin=subprocess.PIPE if input else subprocess.DEVNULL, + stdin=_select_stdin(input, stdin), stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + stderr=stderr, env=env, + cwd=cwd, ) except OSError as e: raise Error(args, f"Could not execute: {e}").with_exception(e) @@ -260,6 +275,17 @@ def stream(proc, input=None, bufsize=32 << 10, timeout=None): yield key.data, data +def _select_stdin(input=None, stdin=None): + if input and stdin: + raise RuntimeError("intput and stdin are mutually exclusive") + if input: + return subprocess.PIPE + if stdin: + return stdin + # Avoid blocking foerver if there is no input. + return subprocess.DEVNULL + + def _remaining_time(deadline): if deadline is None: return None diff --git a/test/drenv/commands_test.py b/test/drenv/commands_test.py index 9b9bcada6..d9e479e17 100644 --- a/test/drenv/commands_test.py +++ b/test/drenv/commands_test.py @@ -161,6 +161,14 @@ def test_watch_with_input_large(): assert output == [text] +def test_watch_stdin(tmpdir): + path = tmpdir.join("file") + path.write("line1\nline2\n") + with open(path) as f: + output = list(commands.watch("cat", stdin=f)) + assert output == ["line1", "line2"] + + def test_watch_lines(): script = """ for i in range(10): @@ -170,6 +178,35 @@ def test_watch_lines(): assert output == ["line %d" % i for i in range(10)] +def test_watch_stderr_success(): + # Watching command like drenv, logging only to stderr without any output. + script = r""" +import sys +for i in range(10): + sys.stderr.write(f"line {i}\n") +""" + cmd = ["python3", "-c", script] + output = list(commands.watch(*cmd, stderr=subprocess.STDOUT)) + assert output == [f"line {i}" for i in range(10)] + + +def test_watch_stderr_error(): + # When stderr is redirected to stdout the error is empty. + script = r""" +import sys +sys.stderr.write("before error\n") +sys.exit("error") +""" + cmd = ["python3", "-c", script] + output = [] + with pytest.raises(commands.Error) as e: + for line in commands.watch(*cmd, stderr=subprocess.STDOUT): + output.append(line) + + assert output == ["before error", "error"] + assert e.value.error == "" + + def test_watch_partial_lines(): script = """ import time @@ -318,6 +355,12 @@ def test_watch_env(): assert output == [env["DRENV_COMMAND_TEST"]] +def test_watch_cwd(tmpdir): + tmpdir.join("file").write("content") + output = list(commands.watch("cat", "file", cwd=str(tmpdir))) + assert output == ["content"] + + # Running commands. @@ -331,6 +374,14 @@ def test_run_input(): assert output == "input" +def test_run_stdin(tmpdir): + path = tmpdir.join("file") + path.write("line1\nline2\n") + with open(path) as f: + output = commands.run("cat", stdin=f) + assert output == "line1\nline2\n" + + def test_run_input_non_ascii(): output = commands.run("cat", input="\u05d0") assert output == "\u05d0" @@ -405,6 +456,12 @@ def test_run_env(): assert out == env["DRENV_COMMAND_TEST"] +def test_run_cwd(tmpdir): + tmpdir.join("file").write("content") + output = commands.run("cat", "file", cwd=str(tmpdir)) + assert output == "content" + + # Formatting errors. diff --git a/test/drenv/containerd.py b/test/drenv/containerd.py index 2756cb77f..1815be6f0 100644 --- a/test/drenv/containerd.py +++ b/test/drenv/containerd.py @@ -6,17 +6,16 @@ import toml -from . import minikube from . import patch -def configure(profile): +def configure(provider, profile): config = f"{profile['name']}:/etc/containerd/config.toml" with tempfile.TemporaryDirectory() as tmpdir: tmp = os.path.join(tmpdir, "config.toml") - minikube.cp(profile["name"], config, tmp) + provider.cp(profile["name"], config, tmp) with open(tmp) as f: old_config = toml.load(f) @@ -24,6 +23,6 @@ def configure(profile): with open(tmp, "w") as f: toml.dump(new_config, f) - minikube.cp(profile["name"], tmp, config) + provider.cp(profile["name"], tmp, config) - minikube.ssh(profile["name"], "sudo systemctl restart containerd") + provider.ssh(profile["name"], "sudo systemctl restart containerd") diff --git a/test/drenv/drenv_test.py b/test/drenv/drenv_test.py index 9454c323f..4f972ad63 100644 --- a/test/drenv/drenv_test.py +++ b/test/drenv/drenv_test.py @@ -2,15 +2,17 @@ # SPDX-License-Identifier: Apache-2.0 import json +import logging import os +import subprocess -import yaml import pytest import drenv from drenv import cluster from drenv import commands from drenv import kubectl +from drenv import yaml EXAMPLE_ENV = os.path.join("envs", "example.yaml") EXTERNAL_ENV = os.path.join("envs", "external.yaml") @@ -19,23 +21,23 @@ def test_start_unknown(): # Cluster does not exists, so it should fail. with pytest.raises(commands.Error): - commands.run("drenv", "start", "--name-prefix", "unknown-", EXTERNAL_ENV) + watch("drenv", "start", "--name-prefix", "unknown-", EXTERNAL_ENV, "--verbose") def test_start(tmpenv): - commands.run("drenv", "start", "--name-prefix", tmpenv.prefix, EXTERNAL_ENV) + watch("drenv", "start", "--name-prefix", tmpenv.prefix, EXTERNAL_ENV, "--verbose") assert cluster.status(tmpenv.prefix + "cluster") == cluster.READY def test_dump_without_prefix(): - out = commands.run("drenv", "dump", EXAMPLE_ENV) + out = run("drenv", "dump", EXAMPLE_ENV) dump = yaml.safe_load(out) assert dump["profiles"][0]["name"] == "ex1" assert dump["profiles"][1]["name"] == "ex2" def test_dump_with_prefix(): - out = commands.run("drenv", "dump", "--name-prefix", "test-", EXAMPLE_ENV) + out = run("drenv", "dump", "--name-prefix", "test-", EXAMPLE_ENV) dump = yaml.safe_load(out) assert dump["profiles"][0]["name"] == "test-ex1" assert dump["profiles"][1]["name"] == "test-ex2" @@ -43,23 +45,23 @@ def test_dump_with_prefix(): def test_stop_unknown(): # Does nothing, so should succeed. - commands.run("drenv", "stop", "--name-prefix", "unknown-", EXTERNAL_ENV) + run("drenv", "stop", "--name-prefix", "unknown-", EXTERNAL_ENV) def test_stop(tmpenv): # Stop does nothing, so cluster must be ready. - commands.run("drenv", "stop", "--name-prefix", tmpenv.prefix, EXTERNAL_ENV) + run("drenv", "stop", "--name-prefix", tmpenv.prefix, EXTERNAL_ENV) assert cluster.status(tmpenv.prefix + "cluster") == cluster.READY def test_delete_unknown(): # Does nothing, so should succeed. - commands.run("drenv", "delete", "--name-prefix", "unknown-", EXTERNAL_ENV) + run("drenv", "delete", "--name-prefix", "unknown-", EXTERNAL_ENV) def test_delete(tmpenv): # Delete does nothing, so cluster must be ready. - commands.run("drenv", "delete", "--name-prefix", tmpenv.prefix, EXTERNAL_ENV) + run("drenv", "delete", "--name-prefix", tmpenv.prefix, EXTERNAL_ENV) assert cluster.status(tmpenv.prefix + "cluster") == cluster.READY @@ -76,7 +78,7 @@ def test_missing_addon(tmpdir): path = tmpdir.join("missing-addon.yaml") path.write(content) with pytest.raises(commands.Error): - commands.run("drenv", "start", str(path)) + run("drenv", "start", str(path)) def test_kustomization(tmpdir): @@ -147,9 +149,19 @@ def get_config(context=None, kubeconfig=None): args = [ "view", "--minify", + "--flatten", "--output=json", ] if kubeconfig: args.append(f"--kubeconfig={kubeconfig}") out = kubectl.config(*args, context=context) return json.loads(out) + + +def run(*args): + return commands.run(*args) + + +def watch(*args): + for line in commands.watch(*args, stderr=subprocess.STDOUT): + logging.debug("%s", line) diff --git a/test/drenv/envfile.py b/test/drenv/envfile.py index 403b95b15..d166a7325 100644 --- a/test/drenv/envfile.py +++ b/test/drenv/envfile.py @@ -1,18 +1,24 @@ # SPDX-FileCopyrightText: The RamenDR authors # SPDX-License-Identifier: Apache-2.0 -import os import copy +import logging +import os import platform -import yaml +from . import yaml +PROVIDER = "$provider" VM = "$vm" CONTAINER = "$container" SHARED_NETWORK = "$network" _PLATFORM_DEFAULTS = { "__default__": { + PROVIDER: { + "x86_64": "", + "arm64": "", + }, VM: { "x86_64": "", "arm64": "", @@ -24,6 +30,10 @@ }, }, "linux": { + PROVIDER: { + "x86_64": "minikube", + "arm64": "", + }, VM: { "x86_64": "kvm2", "arm64": "", @@ -35,9 +45,13 @@ }, }, "darwin": { + PROVIDER: { + "x86_64": "lima", + "arm64": "lima", + }, VM: { - "x86_64": "hyperkit", - "arm64": "qemu", + "x86_64": "", + "arm64": "", }, CONTAINER: "podman", SHARED_NETWORK: { @@ -49,9 +63,9 @@ def platform_defaults(): - # By default, use minikube defaults. - + # By default, use provider defaults. operating_system = platform.system().lower() + logging.debug("[envfile] Detected os: '%s'", operating_system) return _PLATFORM_DEFAULTS.get(operating_system, _PLATFORM_DEFAULTS["__default__"]) @@ -122,7 +136,8 @@ def _validate_profile(profile, addons_root): # If True, this is an external cluster and we don't have to start it. profile.setdefault("external", False) - # Properties for minikube created cluster. + # Common properties. + profile.setdefault("provider", PROVIDER) profile.setdefault("driver", VM) profile.setdefault("container_runtime", "") profile.setdefault("extra_disks", 0) @@ -140,6 +155,9 @@ def _validate_profile(profile, addons_root): profile.setdefault("containerd", None) profile.setdefault("workers", []) + # Lima provider properties. + profile.setdefault("rosetta", True) + _validate_platform_defaults(profile) for i, worker in enumerate(profile["workers"]): @@ -149,6 +167,10 @@ def _validate_profile(profile, addons_root): def _validate_platform_defaults(profile): platform = platform_defaults() machine = os.uname().machine + logging.debug("[envfile] Detected machine: '%s'", machine) + + if profile["provider"] == PROVIDER: + profile["provider"] = platform[PROVIDER][machine] if profile["driver"] == VM: profile["driver"] = platform[VM][machine] @@ -158,6 +180,10 @@ def _validate_platform_defaults(profile): if profile["network"] == SHARED_NETWORK: profile["network"] = platform[SHARED_NETWORK][machine] + logging.debug("[envfile] Using provider: '%s'", profile["provider"]) + logging.debug("[envfile] Using driver: '%s'", profile["driver"]) + logging.debug("[envfile] Using network: '%s'", profile["network"]) + def _validate_worker(worker, env, addons_root, index): worker["name"] = f'{env["name"]}/{worker.get("name", index)}' diff --git a/test/drenv/kubeconfig.py b/test/drenv/kubeconfig.py new file mode 100644 index 000000000..06c3bc8f3 --- /dev/null +++ b/test/drenv/kubeconfig.py @@ -0,0 +1,142 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import logging +import os +import tempfile +import time +from contextlib import contextmanager + +from . import kubectl +from . import yaml + +DEFAULT_CONFIG = os.path.expanduser("~/.kube/config") + + +def merge(profile, *sources, target=DEFAULT_CONFIG): + """ + Merge one or more source kubeconfigs into target kubeconfig, adding or + replacing items in the target. + """ + logging.debug( + "[%s] Merging kubeconfigs %s into %s", + profile["name"], + sources, + target, + ) + configs = [target] + configs.extend(sources) + env = dict(os.environ) + env["KUBECONFIG"] = ":".join(configs) + with _lockfile(profile, target): + data = kubectl.config("view", "--flatten", env=env) + _write(target, data) + + +def remove(profile, target=DEFAULT_CONFIG): + """ + Remove context, cluster, and user from target. Assumes that all share the + same name. + + kubectl config is not idempotent and fail when deleting non-existing items. + It also does not provide a way to list clusters and users, so we cannot + check if a user or cluster exists. + """ + logging.debug("[%s] Removing cluster config from '%s'", profile["name"], target) + modified = False + with _lockfile(profile, target): + try: + config = _load(target) + except FileNotFoundError: + return + + for k in ("contexts", "clusters", "users"): + old = config.get(k, []) + new = [v for v in old if v["name"] != profile["name"]] + if len(new) < len(old): + config[k] = new + modified = True + + if config["current-context"] == profile["name"]: + config["current-context"] = "" + modified = True + + if not modified: + return + + data = yaml.dump(config) + _write(target, data) + + +@contextmanager +def _lockfile(profile, target, attempts=10, delay=0.01, factor=2.0): + """ + Lock file compatible with `kubectl config` or other go programs using the + same library. + https://github.com/kubernetes/client-go/blob/master/tools/clientcmd/loader.go + + This is a pretty bad way to lock files, but we don't have a choices since + kubectl is using this method. + + In client-go there is no retry - if the lockfile exists, the command will + fail: + + % kubectl config rename-context minikube foo + error: open /Users/nsoffer/.kube/config.lock: file exists + + This is not usedul behavior for drenv since it leads to retrying of very + slow operations, so we retry the operation with exponential backoff before + failing. + + However if the lockfile was left over after killing kubectl or drenv, the + only way to recover is to delete the lock file. + """ + lockfile = target + ".lock" + + logging.debug("[%s] Creating lockfile '%s'", profile["name"], lockfile) + os.makedirs(os.path.dirname(lockfile), exist_ok=True) + for i in range(1, attempts + 1): + try: + fd = os.open(lockfile, os.O_CREAT | os.O_EXCL, 0) + except FileExistsError: + if i == attempts: + raise + time.sleep(delay) + delay *= factor + else: + os.close(fd) + break + try: + yield + finally: + logging.debug("[%s] Removing lockfile '%s'", profile["name"], lockfile) + try: + os.remove(lockfile) + except FileNotFoundError: + logging.warning( + "[%s] Lockfile '%s' was removed while locked", + profile["name"], + lockfile, + ) + + +def _load(target): + with open(target) as f: + return yaml.safe_load(f) + + +def _write(target, data): + fd, tmp = tempfile.mkstemp( + dir=os.path.dirname(target), + prefix=os.path.basename(target), + suffix=".tmp", + ) + try: + os.write(fd, data.encode()) + os.fsync(fd) + os.rename(tmp, target) + except BaseException: + os.remove(tmp) + raise + finally: + os.close(fd) diff --git a/test/drenv/kubectl.py b/test/drenv/kubectl.py index 35acbfe77..693129ade 100644 --- a/test/drenv/kubectl.py +++ b/test/drenv/kubectl.py @@ -22,11 +22,11 @@ def version(context=None, output=None): raise -def config(*args, context=None): +def config(*args, env=None, context=None): """ Run kubectl config ... and return the output. """ - return _run("config", *args, context=context) + return _run("config", *args, env=env, context=context) def create(*args, context=None): @@ -200,12 +200,12 @@ def gather(contexts, namespaces=None, directory=None): commands.run(*cmd) -def _run(cmd, *args, context=None): +def _run(cmd, *args, env=None, context=None): cmd = ["kubectl", cmd] if context: cmd.extend(("--context", context)) cmd.extend(args) - return commands.run(*cmd) + return commands.run(*cmd, env=env) def _watch(cmd, *args, input=None, context=None, log=print): diff --git a/test/drenv/providers/__init__.py b/test/drenv/providers/__init__.py new file mode 100644 index 000000000..7c63a2f59 --- /dev/null +++ b/test/drenv/providers/__init__.py @@ -0,0 +1,8 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import importlib + + +def get(name): + return importlib.import_module("drenv.providers." + name) diff --git a/test/drenv/providers/external.py b/test/drenv/providers/external.py new file mode 100644 index 000000000..d1f0ae835 --- /dev/null +++ b/test/drenv/providers/external.py @@ -0,0 +1,77 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import logging +import time +from functools import partial + +from drenv import cluster + +# Provider scope + + +def setup(): + logging.info("[external] Skipping setup for external provider") + + +def cleanup(): + logging.info("[external] Skipping cleanup for external provider") + + +# Cluster scope + + +def exists(profile): + return True + + +def start(profile, verbose=False, timeout=None): + start = time.monotonic() + logging.info("[%s] Checking external cluster status", profile["name"]) + + # Fail fast if cluster is not configured, we cannot recover from this. + status = cluster.status(profile["name"]) + if status == cluster.UNKNOWN: + raise RuntimeError(f"Cluster '{profile['name']}' does not exist") + + # Otherwise handle temporary outage gracefuly. + debug = partial(logging.debug, f"[{profile['name']}] %s") + cluster.wait_until_ready(profile["name"], timeout=60, log=debug) + + logging.info( + "[%s] Cluster ready in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def configure(profile, existing=False): + logging.info("[%s] Skipping configure for external cluster", profile["name"]) + + +def stop(profile): + logging.info("[%s] Skipping stop for external cluster", profile["name"]) + + +def delete(profile): + logging.info("[%s] Skipping delete for external cluster", profile["name"]) + + +def load(profile, image): + logging.info("[%s] Skipping load image for external cluster", profile["name"]) + + +def suspend(profile): + logging.info("[%s] Skipping suspend for external cluster", profile["name"]) + + +def resume(profile): + logging.info("[%s] Skipping resume for external cluster", profile["name"]) + + +def cp(name, src, dst): + logging.warning("[%s] cp not implemented yet for external cluster", name) + + +def ssh(name, command): + logging.warning("[%s] ssh not implemented yet for external cluster", name) diff --git a/test/drenv/providers/lima/__init__.py b/test/drenv/providers/lima/__init__.py new file mode 100644 index 000000000..d2db1c75c --- /dev/null +++ b/test/drenv/providers/lima/__init__.py @@ -0,0 +1,344 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import importlib.resources as resources +import json +import logging +import os +import subprocess +import tempfile +import threading +import time +from functools import partial + +from drenv import cluster +from drenv import commands +from drenv import kubeconfig +from drenv import yaml + +LIMACTL = "limactl" + +# Important lima statuses +RUNNING = "Running" +STOPPED = "Stopped" + +# Options ignored by this provider. +# TODO: implement what we can. +UNSUPPORTED_OPTIONS = ( + "addons", + "containerd", + "driver", + "extra_config", + "feature_gates", + "network", + "service_cluster_ip_range", +) + +# limactl delete is racy, trying to access lima.yaml in other clusters and +# fails when the files are deleted by another limactl process. Until limactl is +# fixed, ensure only single concurent delete. +_delete_vm_lock = threading.Lock() + +# Provider scope + + +def setup(): + pass + + +def cleanup(): + pass + + +# Cluster scope + + +def exists(profile): + names = _run("list", "--format", "{{.Name}}", context=profile["name"]) + for line in names.splitlines(): + if line == profile["name"]: + return True + return False + + +def start(profile, verbose=False, timeout=None): + start = time.monotonic() + logging.info("[%s] Starting lima cluster", profile["name"]) + + if not exists(profile): + _log_unsupported_options(profile) + with tempfile.NamedTemporaryFile( + prefix=f"drenv.lima.{profile['name']}.tmp", + ) as tmp: + _write_config(profile, tmp.name) + _create_vm(profile, tmp.name) + + # Get vm before starting to detect a stopped vm. + vm = _get_vm(profile) + + _start_vm(profile, timeout=timeout) + _add_kubeconfig(profile, vm) + + debug = partial(logging.debug, f"[{profile['name']}] %s") + cluster.wait_until_ready(profile["name"], timeout=30, log=debug) + + if vm["status"] == STOPPED: + # We have random failures (e.g. ocm webooks) when starting a stopped + # cluster. Until we find A better way, try to wait give the system + # more time to become stable. + # TODO: find a better way. + time.sleep(15) + + logging.info( + "[%s] Cluster started in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def configure(profile, existing=False): + # Cluster is configured when created. + pass + + +def stop(profile): + start = time.monotonic() + logging.info("[%s] Stopping lima cluster", profile["name"]) + + # Stop is not idempotent, and using stop -f does not shutdown the guest + # cleanly, resulting in failures on the next start. + vm = _get_vm(profile) + if vm["status"] == RUNNING: + _stop_vm(profile) + + _remove_kubeconfig(profile) + + logging.info( + "[%s] Cluster stopped in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def delete(profile): + start = time.monotonic() + logging.info("[%s] Deleting lima cluster", profile["name"]) + + with _delete_vm_lock: + _delete_vm(profile) + + _delete_additional_disks(profile) + _remove_kubeconfig(profile) + + logging.info( + "[%s] Cluster deleted in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def load(profile, image): + start = time.monotonic() + logging.info("[%s] Loading image", profile["name"]) + with open(image) as f: + _watch( + "shell", + profile["name"], + "sudo", + "nerdctl", + "--namespace=k8s.io", + "load", + stdin=f, + context=profile["name"], + ) + logging.info( + "[%s] Image loaded in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +# Private helpers + + +def _log_unsupported_options(profile): + for option in UNSUPPORTED_OPTIONS: + if profile[option]: + logging.debug( + "[%s] Ignoring '%s' for lima cluster", + profile["name"], + option, + ) + + +def _write_config(profile, path): + """ + Create vm config for profile at path. + """ + with resources.files("drenv.providers.lima").joinpath("k8s.yaml").open() as f: + config = yaml.safe_load(f) + + # The "vz" type is required to support amd64 images on arm64, needed for + # OCM, and also provide the best performance. + config["vmType"] = "vz" + + if profile["rosetta"]: + config["rosetta"] = {"enabled": True, "binfmt": True} + + # We always use socket_vmnet to get shared network. + config["networks"] = [{"socket": "/var/run/socket_vmnet"}] + + # Add profile options to template + + config["cpus"] = profile["cpus"] + config["memory"] = profile["memory"] + config["disk"] = profile["disk_size"] + + config["additionalDisks"] = _create_additional_disks(profile) + + with open(path, "w") as f: + yaml.dump(config, f) + + +def _create_additional_disks(profile): + disks = _list_disks(profile) + for disk in disks: + logging.info("[%s] Creating disk '%s'", profile["name"], disk["name"]) + _create_disk(profile, disk) + return disks + + +def _delete_additional_disks(profile): + for disk in _list_disks(profile): + logging.info("[%s] Deleting disk %s", profile["name"], disk["name"]) + try: + _delete_disk(profile, disk) + except commands.Error as e: + logging.warning( + "[%s] Cannot delete disk '%s': %s", + profile["name"], + disk["name"], + e, + ) + + +def _get_vm(profile): + out = _run("list", "--format", "json", context=profile["name"]) + for line in out.splitlines(): + vm = json.loads(line) + if vm["name"] == profile["name"]: + return vm + return None + + +def _list_disks(profile): + disks = [] + for i in range(profile["extra_disks"]): + disks.append({"name": f"{profile['name']}-disk{i}", "format": False}) + return disks + + +def _add_kubeconfig(profile, vm): + logging.debug("[%s] Adding lima cluster kubeconfig", profile["name"]) + src = os.path.join(vm["dir"], "copied-from-guest", "kubeconfig.yaml") + _fixup_kubeconfig(profile, src) + kubeconfig.merge(profile, src) + + +def _fixup_kubeconfig(profile, path): + with open(path) as f: + config = yaml.safe_load(f) + + config["clusters"][0]["name"] = profile["name"] + config["users"][0]["name"] = profile["name"] + + item = config["contexts"][0] + item["name"] = profile["name"] + item["context"]["cluster"] = profile["name"] + item["context"]["user"] = profile["name"] + + config["current-context"] = profile["name"] + + with open(path, "w") as f: + yaml.dump(config, f) + + +def _remove_kubeconfig(profile): + logging.debug("[%s] Removing lima cluster kubeconfig", profile["name"]) + kubeconfig.remove(profile) + + +def _create_vm(profile, config): + _watch("create", "--name", profile["name"], config, context=profile["name"]) + + +def _start_vm(profile, timeout=None): + args = ["start"] + if timeout: + args.append(f"--timeout={timeout}s") + args.append(profile["name"]) + _watch(*args, context=profile["name"]) + + +def _stop_vm(profile): + _watch("stop", profile["name"], context=profile["name"]) + + +def _delete_vm(profile): + # --force allows deletion of a running vm. + _watch("delete", "--force", profile["name"], context=profile["name"]) + + +def _create_disk(profile, disk): + _watch( + "disk", + "create", + disk["name"], + "--format", + "raw", + "--size", + profile["disk_size"], + context=profile["name"], + ) + + +def _delete_disk(profile, disk): + _watch("disk", "delete", disk["name"], context=profile["name"]) + + +def _run(*args, context="lima"): + cmd = [LIMACTL, *args] + logging.debug("[%s] Running %s", context, cmd) + return commands.run(*cmd) + + +def _watch(*args, stdin=None, context="lima"): + cmd = [LIMACTL, "--log-format=json", *args] + logging.debug("[%s] Running %s", context, cmd) + for line in commands.watch(*cmd, stdin=stdin, stderr=subprocess.STDOUT): + try: + info = json.loads(line) + except ValueError: + # We don't want to crash if limactl has logging bug, and the line + # may contain useful info. + logging.debug("[%s] %s", context, line) + continue + info.pop("time", None) + msg = info.pop("msg", None) + level = info.pop("level", None) + log = _loggers.get(level, logging.debug) + if info: + log("[%s] %s %s", context, msg, info) + else: + log("[%s] %s", context, msg) + + +# Map lima log levels to python logging functions. Limactl logs are very noisy +# so turn down logging level. +_loggers = { + "debug": logging.debug, + "info": logging.debug, + "warning": logging.debug, + "error": logging.warning, + "fatal": logging.error, +} diff --git a/test/drenv/providers/lima/k8s.yaml b/test/drenv/providers/lima/k8s.yaml new file mode 100644 index 000000000..3d016bc5c --- /dev/null +++ b/test/drenv/providers/lima/k8s.yaml @@ -0,0 +1,202 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 +# +# Derived from lima k8s.yaml example: +# https://github.com/lima-vm/lima/blob/master/examples/k8s.yaml +# The lima-vm project is under Apachee-2.0 license: +# https://github.com/lima-vm/lima/blob/master/LICENSE + +# yamllint disable rule:line-length +--- + +# We use only stable ubuntu image for better stability. +images: + - location: "https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-arm64.img" + arch: "aarch64" + - location: "https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img" + arch: "x86_64" + +mounts: [] + +containerd: + system: true + user: false + +# We access the cluster via the IP address on the shared network. Port +# forwarding cannot work for multiple clusters since same port from multiple +# clusters is mapped to the same host port. +portForwards: + - ignore: true + proto: any + +provision: + + # See + - mode: system + script: | + #!/bin/bash + set -eux -o pipefail + command -v kubeadm >/dev/null 2>&1 && exit 0 + # Install and configure prerequisites + cat < + - mode: system + script: | + #!/bin/bash + set -eux -o pipefail + grep SystemdCgroup /etc/containerd/config.toml && exit 0 + grep "version = 2" /etc/containerd/config.toml || exit 1 + # Configuring the systemd cgroup driver + # Overriding the sandbox (pause) image + cat <>/etc/containerd/config.toml + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "$(kubeadm config images list | grep pause | sort -r | head -n1)" + [plugins."io.containerd.grpc.v1.cri".containerd] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + EOF + systemctl restart containerd + + # See + - mode: system + script: | + #!/bin/bash + set -eux -o pipefail + test -e /etc/kubernetes/admin.conf && exit 0 + export KUBECONFIG=/etc/kubernetes/admin.conf + # Ramen: serve the addiontal shared network instead of the user network. + export ADVERTISE_ADDRESS=$(ip -j -4 addr show dev lima0 | jq -r '.[0].addr_info[0].local') + kubeadm config images list + kubeadm config images pull --cri-socket=unix:///run/containerd/containerd.sock + # Initializing your control-plane node + cat <kubeadm-config.yaml + kind: InitConfiguration + apiVersion: kubeadm.k8s.io/v1beta3 + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + # Ramen: use specific network + node-ip: "$ADVERTISE_ADDRESS" + # Ramen: speed up image pulls + serialize-image-pulls: "false" + # Ramen: serve specific network. + localAPIEndpoint: + advertiseAddress: "$ADVERTISE_ADDRESS" + --- + kind: ClusterConfiguration + apiVersion: kubeadm.k8s.io/v1beta3 + apiServer: + certSANs: # --apiserver-cert-extra-sans + - "127.0.0.1" + networking: + podSubnet: "10.244.0.0/16" # --pod-network-cidr + --- + kind: KubeletConfiguration + apiVersion: kubelet.config.k8s.io/v1beta1 + cgroupDriver: systemd + featureGates: + StatefulSetAutoDeletePVC: true + EOF + + # We ignore NumCPU preflight error for running a minimal cluster in + # github actions and for testing drenv. + # [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2 + kubeadm init --config kubeadm-config.yaml --ignore-preflight-errors NumCPU + + # Scale down coredns like minikube + kubectl scale deploy coredns -n kube-system --replicas=1 + + # Installing a Pod network add-on + kubectl apply -f https://github.com/flannel-io/flannel/releases/download/v0.24.0/kube-flannel.yml + + # Control plane node isolation + kubectl taint nodes --all node-role.kubernetes.io/control-plane- + + - mode: system + script: | + #!/bin/bash + set -eux -o pipefail + KUBECONFIG=/etc/kubernetes/admin.conf + mkdir -p ${HOME:-/root}/.kube + cp -f $KUBECONFIG ${HOME:-/root}/.kube/config + mkdir -p {{.Home}}/.kube + cp -f $KUBECONFIG {{.Home}}/.kube/config + chown -R {{.User}} {{.Home}}/.kube + +probes: + + - description: "kubeadm installed" + script: | + #!/bin/bash + set -eux -o pipefail + if ! timeout 30s bash -c "until command -v kubeadm >/dev/null 2>&1; do sleep 3; done"; then + echo >&2 "kubeadm is not installed yet" + exit 1 + fi + hint: | + See "/var/log/cloud-init-output.log". in the guest + + - description: "kubeadm completed" + script: | + #!/bin/bash + set -eux -o pipefail + if ! timeout 300s bash -c "until test -f /etc/kubernetes/admin.conf; do sleep 3; done"; then + echo >&2 "k8s is not running yet" + exit 1 + fi + hint: | + The k8s kubeconfig file has not yet been created. + + - description: "kubernetes cluster is ready" + script: | + #!/bin/bash + set -eux -o pipefail + if ! timeout 300s bash -c "until kubectl get --raw /readyz >/dev/null 2>&1; do sleep 3; done"; then + echo >&2 "kubernetes cluster is not ready yet" + exit 1 + fi + +copyToHost: + - guest: "/etc/kubernetes/admin.conf" + host: "{{.Dir}}/copied-from-guest/kubeconfig.yaml" + deleteOnStop: true diff --git a/test/drenv/minikube.py b/test/drenv/providers/minikube.py similarity index 51% rename from test/drenv/minikube.py rename to test/drenv/providers/minikube.py index e8ed0b2f8..f03bd39f5 100644 --- a/test/drenv/minikube.py +++ b/test/drenv/providers/minikube.py @@ -5,10 +5,13 @@ import json import logging import os +import sys +import time from packaging.version import Version -from . import commands +from drenv import commands +from drenv import containerd EXTRA_CONFIG = [ # When enabled, tells the Kubelet to pull images one at a time. This slows @@ -21,72 +24,94 @@ ] -def profile(command, output=None): - # Workaround for https://github.com/kubernetes/minikube/pull/16900 - # TODO: remove when issue is fixed. - _create_profiles_dir() +# Provider scope - return _run("profile", command, output=output) + +def setup(): + """ + Set up minikube to work with drenv. Must be called before starting the + first cluster. + + To load the configuration you must call configure() after a cluster is + started. + """ + version = _version() + logging.debug("[minikube] Using minikube version %s", version) + _setup_sysctl(version) + _setup_systemd_resolved(version) -def status(profile, output=None): - return _run("status", profile=profile, output=output) - - -def start( - profile, - driver=None, - container_runtime=None, - extra_disks=None, - disk_size=None, - network=None, - nodes=None, - cni=None, - cpus=None, - memory=None, - addons=(), - service_cluster_ip_range=None, - extra_config=None, - feature_gates=None, - alsologtostderr=False, -): +def cleanup(): + """ + Cleanup files added by setup(). + """ + _cleanup_file(_systemd_resolved_drenv_conf()) + _cleanup_file(_sysctl_drenv_conf()) + + +# Cluster scope + + +def exists(profile): + out = _profile("list", output="json") + profiles = json.loads(out) + for p in profiles["valid"]: + if p["Name"] == profile["name"]: + return True + return False + + +def start(profile, verbose=False, timeout=None): + start = time.monotonic() + logging.info("[%s] Starting minikube cluster", profile["name"]) + args = [] - if driver: - args.extend(("--driver", driver)) - if container_runtime: - args.extend(("--container-runtime", container_runtime)) - if extra_disks: - args.extend(("--extra-disks", str(extra_disks))) - if disk_size: - args.extend(("--disk-size", disk_size)) # "4g" - if network: - args.extend(("--network", network)) - if nodes: - args.extend(("--nodes", str(nodes))) - if cni: - args.extend(("--cni", cni)) - if cpus: - args.extend(("--cpus", str(cpus))) - if memory: - args.extend(("--memory", memory)) - if addons: - args.extend(("--addons", ",".join(addons))) - if service_cluster_ip_range: - args.extend(("--service-cluster-ip-range", service_cluster_ip_range)) + if profile["driver"]: + args.extend(("--driver", profile["driver"])) + + if profile["container_runtime"]: + args.extend(("--container-runtime", profile["container_runtime"])) + + if profile["extra_disks"]: + args.extend(("--extra-disks", str(profile["extra_disks"]))) + + if profile["disk_size"]: + args.extend(("--disk-size", profile["disk_size"])) # "4g" + + if profile["network"]: + args.extend(("--network", profile["network"])) + + if profile["nodes"]: + args.extend(("--nodes", str(profile["nodes"]))) + + if profile["cni"]: + args.extend(("--cni", profile["cni"])) + + if profile["cpus"]: + args.extend(("--cpus", str(profile["cpus"]))) + + if profile["memory"]: + args.extend(("--memory", profile["memory"])) + + if profile["addons"]: + args.extend(("--addons", ",".join(profile["addons"]))) + + if profile["service_cluster_ip_range"]: + args.extend(("--service-cluster-ip-range", profile["service_cluster_ip_range"])) for pair in EXTRA_CONFIG: args.extend(("--extra-config", pair)) - if extra_config: - for pair in extra_config: + if profile["extra_config"]: + for pair in profile["extra_config"]: args.extend(("--extra-config", pair)) - if feature_gates: + if profile["feature_gates"]: # Unlike --extra-config this requires one comma separated value. - args.extend(("--feature-gates", ",".join(feature_gates))) + args.extend(("--feature-gates", ",".join(profile["feature_gates"]))) - if alsologtostderr: + if verbose: args.append("--alsologtostderr") args.append("--insecure-registry=host.minikube.internal:5000") @@ -94,57 +119,120 @@ def start( # TODO: Use --interactive=false when the bug is fixed. # https://github.com/kubernetes/minikube/issues/19518 - _watch("start", *args, profile=profile) + _watch("start", *args, profile=profile["name"], timeout=timeout) + + logging.info( + "[%s] Cluster started in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def configure(profile, existing=False): + """ + Load configuration done in setup() before the minikube cluster was + started. + + Must be called after the cluster is started, before running any addon. + """ + if not existing: + if profile["containerd"]: + logging.info("[%s] Configuring containerd", profile["name"]) + containerd.configure(sys.modules[__name__], profile) + _configure_sysctl(profile["name"]) + _configure_systemd_resolved(profile["name"]) + + if existing: + _wait_for_fresh_status(profile) def stop(profile): - _watch("stop", profile=profile) + start = time.monotonic() + logging.info("[%s] Stopping cluster", profile["name"]) + _watch("stop", profile=profile["name"]) + logging.info( + "[%s] Cluster stopped in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) def delete(profile): - _watch("delete", profile=profile) + start = time.monotonic() + logging.info("[%s] Deleting cluster", profile["name"]) + _watch("delete", profile=profile["name"]) + logging.info( + "[%s] Cluster deleted in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def load(profile, image): + start = time.monotonic() + logging.info("[%s] Loading image", profile["name"]) + _watch("image", "load", image, profile=profile["name"]) + logging.info( + "[%s] Image loaded in %.2f seconds", + profile["name"], + time.monotonic() - start, + ) + + +def suspend(profile): + if profile["driver"] != "kvm2": + logging.warning("[%s] suspend supported only for kvm2 driver", profile["name"]) + return + logging.info("[%s] Suspending cluster", profile["name"]) + cmd = ["virsh", "-c", "qemu:///system", "suspend", profile["name"]] + for line in commands.watch(*cmd): + logging.debug("[%s] %s", profile["name"], line) -def cp(profile, src, dst): - _watch("cp", src, dst, profile=profile) +def resume(profile): + if profile["driver"] != "kvm2": + logging.warning("[%s] resume supported only for kvm2 driver", profile["name"]) + return + logging.info("[%s] Resuming cluster", profile["name"]) + cmd = ["virsh", "-c", "qemu:///system", "resume", profile["name"]] + for line in commands.watch(*cmd): + logging.debug("[%s] %s", profile["name"], line) -def ssh(profile, command): - _watch("ssh", command, profile=profile) +def cp(name, src, dst): + _watch("cp", src, dst, profile=name) -def setup_files(): - """ - Set up minikube to work with drenv. Must be called before starting the - first cluster. +def ssh(name, command): + _watch("ssh", command, profile=name) - To load the configuration you must call load_files() after a cluster is - created. - """ - version = _version() - logging.debug("[minikube] Using minikube version %s", version) - _setup_sysctl(version) - _setup_systemd_resolved(version) +# Private helpers -def load_files(profile): + +def _wait_for_fresh_status(profile): """ - Load configuration done in setup_files() before the minikube cluster was - started. + When starting an existing cluster, kubectl can report stale status for a + while, before it starts to report real status. Then it takes a while until + all deployments become available. - Must be called after the cluster is started, before running any addon. Not - need when starting a stopped cluster. + We wait 30 seconds to give Kubernetes chance to fail liveness and readiness + checks and start reporting real cluster status. """ - _load_sysctl(profile) - _load_systemd_resolved(profile) + logging.info("[%s] Waiting for fresh status", profile["name"]) + time.sleep(30) -def cleanup_files(): - """ - Cleanup files added by setup_files(). - """ - _cleanup_file(_systemd_resolved_drenv_conf()) - _cleanup_file(_sysctl_drenv_conf()) +def _profile(command, output=None): + # Workaround for https://github.com/kubernetes/minikube/pull/16900 + # TODO: remove when issue is fixed. + _create_profiles_dir() + + return _run("profile", command, output=output) + + +def _status(name, output=None): + return _run("status", profile=name, output=output) def _version(): @@ -178,11 +266,11 @@ def _setup_sysctl(version): _write_file(path, data) -def _load_sysctl(profile): +def _configure_sysctl(name): if not os.path.exists(_sysctl_drenv_conf()): return - logging.debug("[%s] Loading drenv sysctl configuration", profile) - ssh(profile, "sudo sysctl -p /etc/sysctl.d/99-drenv.conf") + logging.debug("[%s] Loading drenv sysctl configuration", name) + ssh(name, "sudo sysctl -p /etc/sysctl.d/99-drenv.conf") def _sysctl_drenv_conf(): @@ -211,11 +299,11 @@ def _setup_systemd_resolved(version): _write_file(path, data) -def _load_systemd_resolved(profile): +def _configure_systemd_resolved(name): if not os.path.exists(_systemd_resolved_drenv_conf()): return - logging.debug("[%s] Loading drenv systemd-resolved configuration", profile) - ssh(profile, "sudo systemctl restart systemd-resolved.service") + logging.debug("[%s] Loading drenv systemd-resolved configuration", name) + ssh(name, "sudo systemctl restart systemd-resolved.service") def _systemd_resolved_drenv_conf(): @@ -276,11 +364,11 @@ def _run(command, *args, profile=None, output=None): return commands.run(*cmd) -def _watch(command, *args, profile=None): +def _watch(command, *args, profile=None, timeout=None): cmd = ["minikube", command, "--profile", profile] cmd.extend(args) logging.debug("[%s] Running %s", profile, cmd) - for line in commands.watch(*cmd): + for line in commands.watch(*cmd, timeout=timeout): logging.debug("[%s] %s", profile, line) diff --git a/test/drenv/ramen.py b/test/drenv/ramen.py index e31c4a80e..fba7a5d1f 100644 --- a/test/drenv/ramen.py +++ b/test/drenv/ramen.py @@ -4,10 +4,9 @@ import os import logging -import yaml - import drenv from . import kubectl +from . import yaml def env_info(filename, name_prefix=None): diff --git a/test/drenv/subctl.py b/test/drenv/subctl.py index 0b925d38b..9185be60b 100644 --- a/test/drenv/subctl.py +++ b/test/drenv/subctl.py @@ -1,6 +1,7 @@ # SPDX-FileCopyrightText: The RamenDR authors # SPDX-License-Identifier: Apache-2.0 +import platform import shutil from . import commands @@ -31,11 +32,20 @@ def join(broker_info, context, clusterid, cable_driver=None, version=None, log=p """ Run subctl join ... logging progress messages. """ - args = ["join", broker_info, "--context", context, "--clusterid", clusterid] + args = [ + "join", + broker_info, + "--context", + context, + "--clusterid", + clusterid, + ] if cable_driver: args.extend(("--cable-driver", cable_driver)) if version: args.append(f"--version={version}") + if platform.system().lower() == "darwin": + args.append("--check-broker-certificate=false") _watch(*args, log=log) diff --git a/test/drenv/test.py b/test/drenv/test.py index ae98c604d..a3f9a7af5 100644 --- a/test/drenv/test.py +++ b/test/drenv/test.py @@ -7,12 +7,11 @@ import os import sys -import yaml - import drenv from drenv import kubectl from . import ramen +from . import yaml workdir = None @@ -145,6 +144,8 @@ def enable_dr(): cluster = lookup_cluster() placement_name = placement.split("/")[1] + consistency_groups = env["features"].get("consistency_groups", False) + cg_enabled = "true" if consistency_groups else "false" drpc = f""" apiVersion: ramendr.openshift.io/v1alpha1 @@ -152,6 +153,8 @@ def enable_dr(): metadata: name: {config['name']}-drpc namespace: {config['namespace']} + annotations: + drplacementcontrol.ramendr.openshift.io/is-cg-enabled: '{cg_enabled}' labels: app: {config['name']} spec: diff --git a/test/drenv/yaml.py b/test/drenv/yaml.py new file mode 100644 index 000000000..f34a62b60 --- /dev/null +++ b/test/drenv/yaml.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 +""" +This module wraps pyyaml for preserving multiline strings when dumping, and +enforcing our defaults. + +Do not use the yaml module directly in the drenv package. +""" + +import yaml + + +def safe_load(stream): + return yaml.safe_load(stream) + + +def safe_load_all(stream): + return yaml.safe_load_all(stream) + + +def dump(data, stream=None): + return yaml.dump(data, stream=stream, sort_keys=False) + + +def _str_presenter(dumper, data): + """ + Preserve multiline strings when dumping yaml. + https://github.com/yaml/pyyaml/issues/240 + """ + if "\n" in data: + # Remove trailing spaces messing out the output. + block = "\n".join([line.rstrip() for line in data.splitlines()]) + if data.endswith("\n"): + block += "\n" + return dumper.represent_scalar("tag:yaml.org,2002:str", block, style="|") + return dumper.represent_scalar("tag:yaml.org,2002:str", data) + + +yaml.add_representer(str, _str_presenter) +yaml.representer.SafeRepresenter.add_representer(str, _str_presenter) diff --git a/test/envs/external.yaml b/test/envs/external.yaml index 51da25131..98238dd99 100644 --- a/test/envs/external.yaml +++ b/test/envs/external.yaml @@ -1,12 +1,12 @@ # SPDX-FileCopyrightText: The RamenDR authors # SPDX-License-Identifier: Apache-2.0 -# Example environment using external clusters. The cluster `test` must exist -# when this environment is started. +# Example environment using external clusters. The cluster must exist when this +# environment is started. # # To try this example, create the cluster with: # -# drenv start envs/test.yaml +# drenv start envs/vm.yaml # # Now you can start this environment with: # @@ -20,7 +20,7 @@ name: external profiles: - name: cluster - external: true + provider: external workers: - addons: - name: example diff --git a/test/envs/kubevirt.yaml b/test/envs/kubevirt.yaml index 556e3acf6..6b5dc07cf 100644 --- a/test/envs/kubevirt.yaml +++ b/test/envs/kubevirt.yaml @@ -19,6 +19,7 @@ profiles: disk_size: "100g" workers: - addons: + - name: external-snapshotter - name: rook-operator - name: rook-cluster - name: rook-toolbox diff --git a/test/envs/regional-dr-hubless.yaml b/test/envs/regional-dr-hubless.yaml index 4b1d61340..37b72a3b8 100644 --- a/test/envs/regional-dr-hubless.yaml +++ b/test/envs/regional-dr-hubless.yaml @@ -20,8 +20,6 @@ templates: memory: "6g" extra_disks: 1 disk_size: "50g" - addons: - - volumesnapshots workers: - addons: - name: rook-operator @@ -30,6 +28,7 @@ templates: - name: rook-pool - name: rook-cephfs - addons: + - name: external-snapshotter - name: csi-addons - name: olm - name: minio diff --git a/test/envs/regional-dr-kubevirt.yaml b/test/envs/regional-dr-kubevirt.yaml index a5a18a4d6..8103d69ef 100644 --- a/test/envs/regional-dr-kubevirt.yaml +++ b/test/envs/regional-dr-kubevirt.yaml @@ -27,6 +27,7 @@ templates: disk_size: "50g" workers: - addons: + - name: external-snapshotter - name: rook-operator - name: rook-cluster - name: rook-toolbox diff --git a/test/envs/regional-dr.yaml b/test/envs/regional-dr.yaml index 89378e04d..eb7dc6919 100644 --- a/test/envs/regional-dr.yaml +++ b/test/envs/regional-dr.yaml @@ -16,6 +16,10 @@ templates: - name: "dr-cluster" driver: "$vm" container_runtime: containerd + containerd: + plugins: + io.containerd.grpc.v1.cri: + device_ownership_from_security_context: true network: "$network" cpus: 4 memory: "6g" @@ -23,8 +27,6 @@ templates: disk_size: "50g" feature_gates: - StatefulSetAutoDeletePVC=true - addons: - - volumesnapshots workers: - addons: - name: rook-operator @@ -37,6 +39,7 @@ templates: args: ["$name", "hub"] - name: recipe - addons: + - name: external-snapshotter - name: csi-addons - name: olm - name: minio diff --git a/test/envs/rook.yaml b/test/envs/rook.yaml index 59fa4be37..b50b6d901 100644 --- a/test/envs/rook.yaml +++ b/test/envs/rook.yaml @@ -14,8 +14,6 @@ templates: memory: "6g" extra_disks: 1 disk_size: "50g" - addons: - - volumesnapshots workers: - addons: - name: rook-operator @@ -24,6 +22,7 @@ templates: - name: rook-pool - name: rook-cephfs - addons: + - name: external-snapshotter - name: csi-addons profiles: diff --git a/test/envs/vm.yaml b/test/envs/vm.yaml index 85da2db03..d947ba806 100644 --- a/test/envs/vm.yaml +++ b/test/envs/vm.yaml @@ -8,7 +8,9 @@ profiles: - name: cluster driver: $vm container_runtime: containerd - memory: "3g" + cpus: 1 + memory: "2g" + rosetta: false workers: - addons: - name: example diff --git a/test/envs/volsync.yaml b/test/envs/volsync.yaml index 623acd7f8..dc01edacf 100644 --- a/test/envs/volsync.yaml +++ b/test/envs/volsync.yaml @@ -22,14 +22,14 @@ templates: memory: 6g extra_disks: 1 disk_size: 50g - addons: - - volumesnapshots workers: - addons: - name: rook-operator - name: rook-cluster - name: rook-toolbox - name: rook-cephfs + - addons: + - name: external-snapshotter profiles: - name: hub diff --git a/test/setup.py b/test/setup.py index ae122d6d7..5b5dfeca5 100644 --- a/test/setup.py +++ b/test/setup.py @@ -17,7 +17,12 @@ long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/RamenDR/ramen", - packages=["drenv"], + packages=[ + "drenv", + "drenv.providers", + "drenv.providers.lima", + ], + include_package_data=True, install_requires=[ "PyYAML", "toml",