diff --git a/.github/workflows/multus.yaml b/.github/workflows/multus.yaml index f71b1482e260..a24a6289b78b 100644 --- a/.github/workflows/multus.yaml +++ b/.github/workflows/multus.yaml @@ -55,6 +55,9 @@ jobs: - name: Setup multus run: ./tests/scripts/multus/setup-multus.sh + - name: Set up multus prerequisite host routing + run: kubectl create -f tests/scripts/multus/host-cfg-ds.yaml + - name: Install public and cluster NADs in default namespace run: kubectl create -f tests/scripts/multus/default-public-cluster-nads.yaml diff --git a/Documentation/CRDs/Cluster/network-providers.md b/Documentation/CRDs/Cluster/network-providers.md index 053e443a9807..142d8cda4576 100644 --- a/Documentation/CRDs/Cluster/network-providers.md +++ b/Documentation/CRDs/Cluster/network-providers.md @@ -207,26 +207,43 @@ ranges could be manually specified for the networks if needed. ### Validating Multus configuration -We **highly** recommend validating your Multus configuration before you install Rook. A tool exists -to facilitate validating the Multus configuration. After installing the Rook operator and before -installing any Custom Resources, run the tool from the operator pod. +We **highly** recommend validating your Multus configuration before you install a CephCluster. +A tool exists to facilitate validating the Multus configuration. After installing the Rook operator +and before installing any Custom Resources, run the tool from the operator pod. The tool's CLI is designed to be as helpful as possible. Get help text for the multus validation tool like so: -```console -kubectl --namespace rook-ceph exec -it deploy/rook-ceph-operator -- rook multus validation run --help -``` +1. Exec into the Rook operator pod + + ```console + kubectl --namespace rook-ceph exec -it deploy/rook-ceph-operator -- bash + ``` + +2. Output and read the tool's help text -Then, update the args in the -[multus-validation](https://github.com/rook/rook/blob/master/deploy/examples/multus-validation.yaml) -job template. Minimally, add the NAD names(s) for public and/or cluster as needed and then, -create the job to validate the Multus configuration. + ```console + rook multus validation run --help + ``` -If the tool fails, it will suggest what things may be preventing Multus networks from working -properly, and it will request the logs and outputs that will help debug issues. +3. Use the validation tool config file for advanced configuration. -Check the logs of the pod created by the job to know the status of the validation test. + ```console + rook multus validation config --help + ``` + + Generate a sample config, that includes commented help text, using one of the available templates. + +4. Run the tool after configuring. If the tool fails, it will suggest what things may be preventing + Multus networks from working properly, and it will request the logs and outputs that will help + debug issues. + +!!! note + The tool requires host network access. Many Kubernetes distros have security limitations. Use + the tool's `serviceAccountName` config option or `--service-account-name` CLI flag to instruct + the tool to run using a particular ServiceAccount in order to allow necessary permissions. + An example compatible with openshift is provided in the Rook repository at + [deploy/examples/multus-validation-test-openshift.yaml](https://github.com/rook/rook/blob/master/deploy/examples/multus-validation-test-openshift.yaml) ### Known limitations with Multus @@ -445,6 +462,10 @@ may be something like PXE, ignition config, cloud-init, Ansible, or any other su reboot is likely necessary to apply configuration updates, but wait until the next step to reboot nodes. +If desired, check that the NetworkAttachmentDefinition modification and host configurations are +compatible using the [Multus validation tool](#validating-multus-configuration). For the upgrade +case, use the `hostCheckOnly: true` config option or `--host-check-only` CLI flag. + **Step 4** After the NetworkAttachmentDefinition is modified, OSD pods must be restarted. It is easiest to diff --git a/Makefile b/Makefile index 2b724940d67a..de860b82fd99 100644 --- a/Makefile +++ b/Makefile @@ -192,7 +192,6 @@ csv-clean: ## Remove existing OLM files. @$(MAKE) -C images/ceph csv-clean docs: helm-docs - @build/deploy/generate-deploy-examples.sh crds: $(CONTROLLER_GEN) $(YQ) @echo Updating CRD manifests diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index b5b24eda3d54..0cf183144e02 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -4,5 +4,6 @@ - Updating Ceph COSI driver images, this impact existing COSI `Buckets` and `BucketAccesses`, please update the `BucketClass` and `BucketAccessClass` for resolving refer [here](https://github.com/rook/rook/discussions/14297) +- During CephBlockPool updates, return an error if an invalid device class is specified. Pools with invalid device classes may start failing reconcile until the correct device class is specified. See #14057. ## Features diff --git a/build/deploy/generate-deploy-examples.sh b/build/deploy/generate-deploy-examples.sh deleted file mode 100755 index ca276a49a5fb..000000000000 --- a/build/deploy/generate-deploy-examples.sh +++ /dev/null @@ -1,24 +0,0 @@ -#! /usr/bin/env bash -set -e - -SCRIPT_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P) -MANIFEST_FILE=../../deploy/examples/multus-validation.yaml -SED=${SCRIPT_ROOT}/../../build/sed-in-place -if [[ ${SKIP_GEN} = true ]]; then - echo "SKIP_GEN is set, skipping manifest generation" - exit -fi -OUT_FILE="${SCRIPT_ROOT}/${MANIFEST_FILE}" -TMP_FILE="$(mktemp)" - -START_MARKER='# THE BELOW HELP TEXT IS AUTO-GENERATED BY `make docs`$' -END_MARKER='# THE ABOVE HELP TEXT IS AUTO-GENERATED BY `make docs`$' - -go run ${SCRIPT_ROOT}/../../cmd/rook/*.go multus validation run --help | awk '{print "# "$0}' | sed -e 's/[ \t]*$//' > ${TMP_FILE} - -${SED} -e "/$START_MARKER/,/$END_MARKER/{ /$START_MARKER/{ p ; r $TMP_FILE - - }; /$END_MARKER/p; d; }" ${OUT_FILE} - -rm -f ${TMP_FILE} - diff --git a/cmd/rook/userfacing/multus/validation/validation.go b/cmd/rook/userfacing/multus/validation/validation.go index 57ef73159ee2..e5f1c432114a 100644 --- a/cmd/rook/userfacing/multus/validation/validation.go +++ b/cmd/rook/userfacing/multus/validation/validation.go @@ -38,6 +38,9 @@ var ( // keep special var for `--daemons-per-node` that needs put into node config for validation run flagDaemonsPerNode = -1 + + // keep special var for --host-check-only flag that can override what is from config file + flagHostCheckOnly = false ) // commands @@ -131,6 +134,9 @@ func init() { "The default value is set to the worst-case value for a Rook Ceph cluster with 3 portable OSDs, 3 portable monitors, "+ "and where all optional child resources have been created with 1 daemon such that they all might run on a single node in a failure scenario. "+ "If you aren't sure what to choose for this value, add 1 for each additional OSD beyond 3.") + runCmd.Flags().BoolVar(&flagHostCheckOnly, "host-check-only", defaultConfig.HostCheckOnly, + "Only check that hosts can connect to the server via the public network. Do not start clients. "+ + "This mode is recommended when a Rook cluster is already running and consuming the public network specified.") runCmd.Flags().StringVar(&validationConfig.NginxImage, "nginx-image", defaultConfig.NginxImage, "The Nginx image used for the validation server and clients.") @@ -147,7 +153,8 @@ func init() { "clients to start, and it therefore may take longer for all clients to become 'Ready'; in that case, this value can be set slightly higher.") runCmd.Flags().StringVarP(&validationConfigFile, "config", "c", "", - "The validation test config file to use. This cannot be used with other flags.") + "The validation test config file to use. This cannot be used with other flags except --host-check-only.") + // allow using --host-check-only in combo with --config so the same config can be used with that flag if desired runCmd.MarkFlagsMutuallyExclusive("config", "timeout-minutes") runCmd.MarkFlagsMutuallyExclusive("config", "namespace") runCmd.MarkFlagsMutuallyExclusive("config", "public-network") @@ -184,6 +191,11 @@ func runValidation(ctx context.Context) { } } + // allow --host-check-only(=true) flag to override default/configfile settings + if flagHostCheckOnly { + validationConfig.HostCheckOnly = true + } + if err := validationConfig.ValidationTestConfig.Validate(); err != nil { fmt.Print(err.Error() + "\n") os.Exit(22 /* EINVAL */) diff --git a/deploy/examples/multus-validation-test-openshift.yaml b/deploy/examples/multus-validation-test-openshift.yaml new file mode 100644 index 000000000000..5781ee162aab --- /dev/null +++ b/deploy/examples/multus-validation-test-openshift.yaml @@ -0,0 +1,38 @@ +# ServiceAccount and RBAC to support running multus validation test on OpenShift +# Deploy these resources, then use `serviceAccountName: multus-validation-test` in the validation +# test config file. +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus-validation-test + namespace: openshift-storage +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: multus-validation-test + namespace: openshift-storage +rules: + - apiGroups: + - security.openshift.io + resourceNames: + - hostnetwork-v2 + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: multus-validation-test + namespace: openshift-storage +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: multus-validation-test +subjects: + - kind: ServiceAccount + name: multus-validation-test + namespace: openshift-storage diff --git a/deploy/examples/multus-validation.yaml b/deploy/examples/multus-validation.yaml deleted file mode 100644 index b33689ab207f..000000000000 --- a/deploy/examples/multus-validation.yaml +++ /dev/null @@ -1,173 +0,0 @@ -#################################################################################################### -# This manifest contains a Kubernetes Job and supporting definitions for running Rook's Multus -# validation tool. The Multus validation tool's help text is copied below. Modify the Job -# definition in this manifest based on the needs of your cluster. -#################################################################################################### -# THE BELOW HELP TEXT IS AUTO-GENERATED BY `make docs` -# -# Run a validation test that determines whether the current Multus and system -# configurations will support Rook with Multus. -# -# This should be run BEFORE Rook is installed. -# -# This is a fairly long-running test. It starts up a web server and many -# clients to verify that Multus network communication works properly. -# -# It does *not* perform any load testing. Networks that cannot support high -# volumes of Ceph traffic may still encounter runtime issues. This may be -# particularly noticeable with high I/O load or during OSD rebalancing -# (see: https://docs.ceph.com/en/latest/architecture/#rebalancing). -# For example, during Rook or Ceph cluster upgrade. -# -# Override the kube config file location by setting the KUBECONFIG environment variable. -# -# Usage: -# rook multus validation run [--public-network=] [--cluster-network=] [flags] -# -# Flags: -# --cluster-network string The name of the Network Attachment Definition (NAD) that will be used for Ceph's cluster network. This should be a namespaced name in the form / if the NAD is defined in a different namespace from the cluster namespace. -# -c, --config string The validation test config file to use. This cannot be used with other flags. -# --daemons-per-node int The number of validation test daemons to run per node. It is recommended to set this to the maximum number of Ceph daemons that can run on any node in the worst case of node failure(s). The default value is set to the worst-case value for a Rook Ceph cluster with 3 portable OSDs, 3 portable monitors, and where all optional child resources have been created with 1 daemon such that they all might run on a single node in a failure scenario. If you aren't sure what to choose for this value, add 1 for each additional OSD beyond 3. (default 19) -# --flaky-threshold-seconds timeoutSeconds This is the time window in which validation clients are all expected to become 'Ready' together. Validation clients are all started at approximately the same time, and they should all stabilize at approximately the same time. Once the first validation client becomes 'Ready', the tool checks that all of the remaining clients become 'Ready' before this threshold duration elapses. In networks that have connectivity issues, limited bandwidth, or high latency, clients will contend for network traffic with each other, causing some clients to randomly fail and become 'Ready' later than others. These randomly-failing clients are considered 'flaky.' Adjust this value to reflect expectations for the underlying network. For fast and reliable networks, this can be set to a smaller value. For networks that are intended to be slow, this can be set to a larger value. Additionally, for very large Kubernetes clusters, it may take longer for all clients to start, and it therefore may take longer for all clients to become 'Ready'; in that case, this value can be set slightly higher. (default 30s) -# -h, --help help for run -# -n, --namespace string The namespace for validation test resources. It is recommended to set this to the namespace in which Rook's Ceph cluster will be installed. (default "rook-ceph") -# --nginx-image string The Nginx image used for the validation server and clients. (default "quay.io/nginx/nginx-unprivileged:stable-alpine") -# --public-network string The name of the Network Attachment Definition (NAD) that will be used for Ceph's public network. This should be a namespaced name in the form / if the NAD is defined in a different namespace from the cluster namespace. -# --timeout-minutes timeoutMinutes The time to wait for resources to change to the expected state. For example, for the test web server to start, for test clients to become ready, or for test resources to be deleted. At longest, this may need to reflect the time it takes for client pods to to pull images, get address assignments, and then for each client to determine that its network connection is stable. Minimum: 1 minute. Recommended: 2 minutes or more. (default 3m0s) -# -# Global Flags: -# --log-level string logging level for logging/tracing output (valid values: ERROR,WARNING,INFO,DEBUG) (default "INFO") -# THE ABOVE HELP TEXT IS AUTO-GENERATED BY `make docs` -#################################################################################################### ---- -# Service account for job that validates multus configuration -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-multus-validation - namespace: rook-ceph # namespace:cluster -# imagePullSecrets: -# - name: my-registry-secret ---- -# Aspects of multus validation job that require access to the operator/cluster namespace -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-multus-validation - namespace: rook-ceph # namespace:cluster -rules: - - apiGroups: [""] - resources: ["configmaps", "configmaps/finalizers", "pods"] - verbs: ["get", "list", "create", "update", "delete"] - - apiGroups: ["apps"] - resources: ["daemonsets"] - verbs: ["list", "create", "delete", "deletecollection"] - - apiGroups: ["k8s.cni.cncf.io"] - resources: ["network-attachment-definitions"] - verbs: ["get"] - - apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["get", "list", "delete"] ---- -# Allow the multus validation job to run in this namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-multus-validation - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-multus-validation -subjects: - - kind: ServiceAccount - name: rook-ceph-multus-validation - namespace: rook-ceph # namespace:cluster ---- -# A job that runs the multus validation tool -apiVersion: batch/v1 -kind: Job -metadata: - name: rook-ceph-multus-validation - namespace: rook-ceph # namespace:cluster - labels: - app: rook-ceph-multus-validation -spec: - template: - metadata: - labels: - app: rook-ceph-multus-validation - spec: - serviceAccountName: rook-ceph-multus-validation - containers: - - name: multus-validation - image: rook/ceph:master - command: ["rook"] - args: - - "multus" - - "validation" - - "run" - # - "--public-network=" # uncomment and replace NAD name if using public network - # - "--cluster-network=" # uncomment and replace NAD name if using cluster network - # - "--nginx-image=" # uncomment and replace IMAGE with the nginx image you want use for the validation server and clients - # - "--daemons-per-node=" # uncomment and replace COUNT with the maximum number of daemons that should be running on each node during validation - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: ROOK_LOG_LEVEL - value: DEBUG - restartPolicy: Never ---- -# This Pod Security Policy (PSP) allows the job to run in Kubernetes environments using PSPs -# apiVersion: rbac.authorization.k8s.io/v1 -# kind: RoleBinding -# metadata: -# name: rook-ceph-multus-validation-psp -# namespace: rook-ceph # namespace:cluster -# roleRef: -# apiGroup: rbac.authorization.k8s.io -# kind: ClusterRole -# name: psp:rook -# subjects: -# - kind: ServiceAccount -# name: rook-ceph-multus-validation -# namespace: rook-ceph # namespace:cluster -# --- -# SecurityContextConstraints(SCC) for the Rook and Ceph daemons -# kind: SecurityContextConstraints -# apiVersion: security.openshift.io/v1 -# metadata: -# name: rook-ceph-multus-validation -# allowPrivilegedContainer: true -# allowHostDirVolumePlugin: true -# allowHostPID: false -# # set to true if running rook with host networking enabled -# allowHostNetwork: true -# # set to true if running rook with the provider as host -# allowHostPorts: true -# priority: -# allowedCapabilities: ["MKNOD"] -# allowHostIPC: true -# readOnlyRootFilesystem: false -# # drop all default privileges -# requiredDropCapabilities: ["All"] -# defaultAddCapabilities: [] -# runAsUser: -# type: RunAsAny -# seLinuxContext: -# type: RunAsAny -# fsGroup: -# type: RunAsAny -# supplementalGroups: -# type: RunAsAny -# seccompProfiles: -# - "*" -# volumes: -# - configMap -# - emptyDir -# - projected -# users: -# - system:serviceaccount:rook-ceph:rook-ceph-multus-validation # serviceaccount:namespace:cluster ---- diff --git a/go.mod b/go.mod index 473c3a845529..0f69a7a7fa1c 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,8 @@ replace ( ) require ( - github.com/IBM/keyprotect-go-client v0.14.1 - github.com/aws/aws-sdk-go v1.54.6 + github.com/IBM/keyprotect-go-client v0.14.3 + github.com/aws/aws-sdk-go v1.54.15 github.com/banzaicloud/k8s-objectmatcher v1.8.0 github.com/ceph/go-ceph v0.28.0 github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8 @@ -30,8 +30,8 @@ require ( github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c github.com/pkg/errors v0.9.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 - github.com/prometheus-operator/prometheus-operator/pkg/client v0.74.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.75.1 github.com/rook/rook/pkg/apis v0.0.0-20231204200402-5287527732f7 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 @@ -49,7 +49,7 @@ require ( k8s.io/cli-runtime v0.30.2 k8s.io/client-go v0.30.2 k8s.io/cloud-provider v0.30.2 - k8s.io/utils v0.0.0-20240310230437-4693a0247e57 + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/mcs-api v0.1.0 sigs.k8s.io/yaml v1.4.0 @@ -82,13 +82,13 @@ require ( github.com/containernetworking/cni v1.2.0-rc1 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gemalto/flume v0.13.1 // indirect github.com/go-errors/errors v1.5.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect @@ -148,19 +148,18 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.24.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.15.0 // indirect sigs.k8s.io/kustomize/kyaml v0.15.0 // indirect diff --git a/go.sum b/go.sum index 2154d8f2425d..45d6ecc62a84 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/IBM/keyprotect-go-client v0.5.1/go.mod h1:5TwDM/4FRJq1ZOlwQL1xFahLWQ3TveR88VmL1u3njyI= -github.com/IBM/keyprotect-go-client v0.14.1 h1:FSBJ3l6GKCuB3CoQPvVy94lOzYTKpjov8WdSDt5Ercs= -github.com/IBM/keyprotect-go-client v0.14.1/go.mod h1:cAt714Vnwnd03mmkBHHSJlDNRVthdRmJB6RePd4/B8Q= +github.com/IBM/keyprotect-go-client v0.14.3 h1:OUKzRclUqY4zIOclx22b1rrbtrY13y5FyADRoq64AQw= +github.com/IBM/keyprotect-go-client v0.14.3/go.mod h1:cAt714Vnwnd03mmkBHHSJlDNRVthdRmJB6RePd4/B8Q= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -144,8 +144,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.54.6 h1:HEYUib3yTt8E6vxjMWM3yAq5b+qjj/6aKA62mkgux9g= -github.com/aws/aws-sdk-go v1.54.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.15 h1:ErgCEVbzuSfuZl9nR+g8FFnzjgeJ/AqAGOEWn6tgAHo= +github.com/aws/aws-sdk-go v1.54.15/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/banzaicloud/k8s-objectmatcher v1.8.0 h1:Nugn25elKtPMTA2br+JgHNeSQ04sc05MDPmpJnd1N2A= github.com/banzaicloud/k8s-objectmatcher v1.8.0/go.mod h1:p2LSNAjlECf07fbhDyebTkPUIYnU05G+WfGgkTmgeMg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -233,8 +233,8 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -293,8 +293,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= @@ -352,7 +352,8 @@ github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85n github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= @@ -455,8 +456,9 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -713,8 +715,8 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -729,8 +731,8 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 h1:+S998xHiJApsJZjRAO8wyedU9GfqFd8mtwWly6LqHDo= github.com/openshift/api v0.0.0-20240301093301-ce10821dc999/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= @@ -768,11 +770,11 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4 github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 h1:AHzMWDxNiAVscJL6+4wkvFRTpMnJqiaZFEKA/osaBXE= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0/go.mod h1:wAR5JopumPtAZnu0Cjv2PSqV4p4QB09LMhc6fZZTXuA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1 h1:+iiljhJV6niK7MuifJs/n3NeLxikd85nrQfn53sLJkU= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU= github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.74.0 h1:SyBTzvFuVshDNjDVALs6+NgOy3qh8/xlAsyqB1SzHbI= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.74.0/go.mod h1:FlcnLo14zQxL6P1yPrV22kYBqyAT0ZRRytv98+B7lBQ= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.75.1 h1:s7GlsRYGLWP+L1eQKy6RmLatX+k3v9NQwutUix4l5uM= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.75.1/go.mod h1:qca3qWGdknRpHvPyThepe5a6QYAh38IQ2ml93E6V3NY= github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -1099,8 +1101,8 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1243,7 +1245,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1394,8 +1395,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1522,8 +1521,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1653,8 +1652,8 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= @@ -1662,8 +1661,8 @@ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 h1:qVoMaQV5t62UUvHe16Q3eb2c5HPzLHYzsi0Tu/xLndo= -k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b h1:Q9xmGWBvOGd8UJyccgpYlLosk/JlfP3xQLNkQlHJeXw= +k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1673,8 +1672,8 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod index 50ad6961361f..61b0e1d53728 100644 --- a/pkg/apis/go.mod +++ b/pkg/apis/go.mod @@ -29,12 +29,10 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/go-jose/go-jose/v4 v4.0.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/onsi/ginkgo/v2 v2.17.1 // indirect - github.com/onsi/gomega v1.32.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/client-go v0.30.2 // indirect - k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) @@ -42,10 +40,10 @@ require ( github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/containernetworking/cni v1.2.0-rc1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect @@ -78,17 +76,16 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect golang.org/x/crypto v0.24.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum index 8c8ab68e6ca1..edb52e70bf04 100644 --- a/pkg/apis/go.sum +++ b/pkg/apis/go.sum @@ -184,8 +184,8 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -231,8 +231,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -288,7 +288,8 @@ github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85n github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= @@ -385,8 +386,9 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -607,8 +609,8 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -622,8 +624,8 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 h1:+S998xHiJApsJZjRAO8wyedU9GfqFd8mtwWly6LqHDo= github.com/openshift/api v0.0.0-20240301093301-ce10821dc999/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= @@ -936,8 +938,8 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1077,7 +1079,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1222,8 +1223,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1348,8 +1347,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1451,16 +1450,16 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 h1:qVoMaQV5t62UUvHe16Q3eb2c5HPzLHYzsi0Tu/xLndo= -k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b h1:Q9xmGWBvOGd8UJyccgpYlLosk/JlfP3xQLNkQlHJeXw= +k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1469,8 +1468,8 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go index 95d778ec8077..82eae5e77eb9 100644 --- a/pkg/daemon/ceph/client/pool.go +++ b/pkg/daemon/ceph/client/pool.go @@ -253,16 +253,16 @@ func DeletePool(context *clusterd.Context, clusterInfo *ClusterInfo, name string logger.Infof("purging pool %q (id=%d)", name, pool.Number) args := []string{"osd", "pool", "delete", name, name, reallyConfirmFlag} - _, err = NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to delete pool %q", name) + return errors.Wrapf(err, "failed to delete pool %q. %s", name, string(output)) } // remove the crush rule for this pool and ignore the error in case the rule is still in use or not found args = []string{"osd", "crush", "rule", "rm", name} - _, err = NewCephCommand(context, clusterInfo, args).Run() + output, err = NewCephCommand(context, clusterInfo, args).Run() if err != nil { - logger.Errorf("failed to delete crush rule %q. %v", name, err) + logger.Errorf("failed to delete crush rule %q. %v. %s", name, err, string(output)) } logger.Infof("purge completed for pool %q", name) @@ -280,9 +280,9 @@ func givePoolAppTag(context *clusterd.Context, clusterInfo *ClusterInfo, poolNam } args := []string{"osd", "pool", "application", "enable", poolName, appName, confirmFlag} - _, err = NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to enable application %q on pool %q", appName, poolName) + return errors.Wrapf(err, "failed to enable application %q on pool %q. %s", appName, poolName, string(output)) } return nil @@ -456,7 +456,7 @@ func createReplicatedPoolForApp(context *clusterd.Context, clusterInfo *ClusterI if checkFailureDomain || pool.PoolSpec.DeviceClass != "" { if err = updatePoolCrushRule(context, clusterInfo, clusterSpec, pool); err != nil { - return nil + return errors.Wrapf(err, "failed to update crush rule for pool %q", pool.Name) } } return nil @@ -561,9 +561,9 @@ func extractPoolDetails(rule ruleSpec) (string, string) { func setCrushRule(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, crushRule string) error { args := []string{"osd", "pool", "set", poolName, "crush_rule", crushRule} - _, err := NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to set crush rule %q", crushRule) + return errors.Wrapf(err, "failed to set crush rule %q. %s", crushRule, string(output)) } return nil } @@ -714,9 +714,9 @@ func createReplicationCrushRule(context *clusterd.Context, clusterInfo *ClusterI args = append(args, deviceClass) } - _, err := NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to create crush rule %s", ruleName) + return errors.Wrapf(err, "failed to create crush rule %s. %s", ruleName, string(output)) } return nil @@ -726,9 +726,9 @@ func createReplicationCrushRule(context *clusterd.Context, clusterInfo *ClusterI func SetPoolProperty(context *clusterd.Context, clusterInfo *ClusterInfo, name, propName, propVal string) error { args := []string{"osd", "pool", "set", name, propName, propVal} logger.Infof("setting pool property %q to %q on pool %q", propName, propVal, name) - _, err := NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to set pool property %q on pool %q", propName, name) + return errors.Wrapf(err, "failed to set pool property %q on pool %q. %s", propName, name, string(output)) } return nil } @@ -737,9 +737,9 @@ func SetPoolProperty(context *clusterd.Context, clusterInfo *ClusterInfo, name, func setPoolQuota(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, quotaType, quotaVal string) error { args := []string{"osd", "pool", "set-quota", poolName, quotaType, quotaVal} logger.Infof("setting quota %q=%q on pool %q", quotaType, quotaVal, poolName) - _, err := NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to set %q quota on pool %q", quotaType, poolName) + return errors.Wrapf(err, "failed to set %q quota on pool %q. %s", quotaType, poolName, string(output)) } return nil } @@ -752,9 +752,9 @@ func SetPoolReplicatedSizeProperty(context *clusterd.Context, clusterInfo *Clust args = append(args, "--yes-i-really-mean-it") } - _, err := NewCephCommand(context, clusterInfo, args).Run() + output, err := NewCephCommand(context, clusterInfo, args).Run() if err != nil { - return errors.Wrapf(err, "failed to set pool property %q on pool %q", propName, poolName) + return errors.Wrapf(err, "failed to set pool property %q on pool %q. %s", propName, poolName, string(output)) } return nil diff --git a/pkg/daemon/ceph/client/pool_test.go b/pkg/daemon/ceph/client/pool_test.go index 28bacae2109a..3afe66b820ca 100644 --- a/pkg/daemon/ceph/client/pool_test.go +++ b/pkg/daemon/ceph/client/pool_test.go @@ -179,6 +179,11 @@ func testCreateReplicaPool(t *testing.T, failureDomain, crushRoot, deviceClass, assert.Equal(t, "12345", args[8]) return "", nil } + if args[2] == "get" { + assert.Equal(t, "mypool", args[3]) + assert.Equal(t, "all", args[4]) + return `{"pool":"replicapool","pool_id":2,"size":1,"min_size":1,"crush_rule":"replicapool_osd"}`, nil + } if args[2] == "set" { assert.Equal(t, "mypool", args[3]) if args[4] == "size" { @@ -203,8 +208,12 @@ func testCreateReplicaPool(t *testing.T, failureDomain, crushRoot, deviceClass, if args[1] == "crush" { crushRuleCreated = true assert.Equal(t, "rule", args[2]) + if args[3] == "dump" { + assert.Equal(t, "replicapool_osd", args[4]) + return `{"rule_id": 3,"rule_name": "replicapool_osd","type": 1}`, nil + } assert.Equal(t, "create-replicated", args[3]) - assert.Equal(t, "mypool", args[4]) + assert.Contains(t, args[4], "mypool") if crushRoot == "" { assert.Equal(t, "cluster-crush-root", args[5]) } else { diff --git a/pkg/daemon/multus/config.go b/pkg/daemon/multus/config.go index 401aa44352c9..a651a183b91f 100644 --- a/pkg/daemon/multus/config.go +++ b/pkg/daemon/multus/config.go @@ -38,6 +38,8 @@ var ( var ( DefaultValidationNamespace = "rook-ceph" + DefaultServiceAccountName = "rook-ceph-system" + DefaultValidationOSDsPerNode = 3 DefaultValidationOtherDaemonsPerNode = 16 @@ -70,13 +72,15 @@ func init() { // for this struct from getting out of date, see the output of ValidationTestConfig.ToYAML() for // usage text for each field. type ValidationTestConfig struct { - Namespace string `yaml:"namespace"` - PublicNetwork string `yaml:"publicNetwork"` - ClusterNetwork string `yaml:"clusterNetwork"` - ResourceTimeout time.Duration `yaml:"resourceTimeout"` - FlakyThreshold time.Duration `yaml:"flakyThreshold"` - NginxImage string `yaml:"nginxImage"` - NodeTypes map[string]NodeConfig `yaml:"nodeTypes"` + Namespace string `yaml:"namespace"` + ServiceAccountName string `yaml:"serviceAccountName"` + PublicNetwork string `yaml:"publicNetwork"` + ClusterNetwork string `yaml:"clusterNetwork"` + ResourceTimeout time.Duration `yaml:"resourceTimeout"` + FlakyThreshold time.Duration `yaml:"flakyThreshold"` + HostCheckOnly bool `yaml:"hostCheckOnly"` + NginxImage string `yaml:"nginxImage"` + NodeTypes map[string]NodeConfig `yaml:"nodeTypes"` } type NodeConfig struct { @@ -114,10 +118,11 @@ func (t *TolerationType) ToJSON() (string, error) { // The default test is a converged-node test with no placement. func NewDefaultValidationTestConfig() *ValidationTestConfig { return &ValidationTestConfig{ - Namespace: DefaultValidationNamespace, - ResourceTimeout: DefaultValidationResourceTimeout, - FlakyThreshold: DefaultValidationFlakyThreshold, - NginxImage: DefaultValidationNginxImage, + Namespace: DefaultValidationNamespace, + ServiceAccountName: DefaultServiceAccountName, + ResourceTimeout: DefaultValidationResourceTimeout, + FlakyThreshold: DefaultValidationFlakyThreshold, + NginxImage: DefaultValidationNginxImage, NodeTypes: map[string]NodeConfig{ DefaultValidationNodeType: { OSDsPerNode: DefaultValidationOSDsPerNode, @@ -266,10 +271,11 @@ var dedicatedWorkerNodeConfig = NodeConfig{ func NewDedicatedStorageNodesValidationTestConfig() *ValidationTestConfig { return &ValidationTestConfig{ - Namespace: DefaultValidationNamespace, - ResourceTimeout: DefaultValidationResourceTimeout, - FlakyThreshold: DefaultValidationFlakyThreshold, - NginxImage: DefaultValidationNginxImage, + Namespace: DefaultValidationNamespace, + ServiceAccountName: DefaultServiceAccountName, + ResourceTimeout: DefaultValidationResourceTimeout, + FlakyThreshold: DefaultValidationFlakyThreshold, + NginxImage: DefaultValidationNginxImage, NodeTypes: map[string]NodeConfig{ DedicatedStorageNodeType: dedicatedStorageNodeConfig, DedicatedWorkerNodeType: dedicatedWorkerNodeConfig, @@ -283,10 +289,11 @@ const ( func NewArbiterValidationTestConfig() *ValidationTestConfig { return &ValidationTestConfig{ - Namespace: DefaultValidationNamespace, - ResourceTimeout: DefaultValidationResourceTimeout, - FlakyThreshold: DefaultValidationFlakyThreshold, - NginxImage: DefaultValidationNginxImage, + Namespace: DefaultValidationNamespace, + ServiceAccountName: DefaultServiceAccountName, + ResourceTimeout: DefaultValidationResourceTimeout, + FlakyThreshold: DefaultValidationFlakyThreshold, + NginxImage: DefaultValidationNginxImage, NodeTypes: map[string]NodeConfig{ DedicatedStorageNodeType: dedicatedStorageNodeConfig, DedicatedWorkerNodeType: dedicatedWorkerNodeConfig, diff --git a/pkg/daemon/multus/config.yaml b/pkg/daemon/multus/config.yaml index e38fea5c8980..fb4cd5032868 100644 --- a/pkg/daemon/multus/config.yaml +++ b/pkg/daemon/multus/config.yaml @@ -2,6 +2,12 @@ # Rook-Ceph cluster will be installed. namespace: "{{ .Namespace }}" +# The service account to run validation test pods as. Empty means pods won't use a service account. +# This is useful in Kubernetes environments where security rules are in place. +# Notably, host checker pods require host network access. +# The default is to use the same service account for the Rook operator (rook-ceph-system). +serviceAccountName: "{{ .ServiceAccountName }}" + # These fields should be set to the name of the Network Attachment Definition (NAD) which will be # used for the Ceph cluster's public or cluster network, respectively. This should be a namespaced # name in the form / if the NAD is defined in a different namespace from the @@ -29,6 +35,13 @@ resourceTimeout: "{{ .ResourceTimeout }}" # longer for all clients to become "Ready"; in that case, this value can be set slightly higher. flakyThreshold: "{{ .FlakyThreshold }}" +# Enable host-check-only mode. This will instruct the validation test routine to only check host +# connectivity to the server via the public network. It won't start clients and cannot check for +# network flakiness. This mode is recommended when a Rook cluster is already running and consuming +# the public network specified. This mode avoids disrupting to the running Rook cluster that could +# be impacted by a large number of running clients that might run without this mode enabled. +hostCheckOnly: {{ .HostCheckOnly }} + # The Nginx image which will be used for the web server and clients. nginxImage: "{{ .NginxImage }}" diff --git a/pkg/daemon/multus/config_test.go b/pkg/daemon/multus/config_test.go index 0e4387017eae..821533f5c9e5 100644 --- a/pkg/daemon/multus/config_test.go +++ b/pkg/daemon/multus/config_test.go @@ -36,12 +36,14 @@ func TestValidationTestConfig_YAML(t *testing.T) { {"empty config", emptyValidationTestConfig}, {"default config", NewDefaultValidationTestConfig()}, {"full config", &ValidationTestConfig{ - Namespace: "my-rook", - PublicNetwork: "my-pub", - ClusterNetwork: "my-priv", - ResourceTimeout: 2 * time.Minute, - FlakyThreshold: 30 * time.Second, - NginxImage: "myorg/nginx:latest", + Namespace: "my-rook", + ServiceAccountName: "my-svc-acct", + PublicNetwork: "my-pub", + ClusterNetwork: "my-priv", + ResourceTimeout: 2 * time.Minute, + FlakyThreshold: 30 * time.Second, + HostCheckOnly: true, + NginxImage: "myorg/nginx:latest", NodeTypes: map[string]NodeConfig{ "osdOnlyNodes": { OSDsPerNode: 9, diff --git a/pkg/daemon/multus/host-daemonset.yaml b/pkg/daemon/multus/host-daemonset.yaml new file mode 100644 index 000000000000..475595d496d5 --- /dev/null +++ b/pkg/daemon/multus/host-daemonset.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: multus-validation-test-host-checker-{{ .NodeType }} + labels: + app: multus-validation-test-host-checker + nodeType: "{{ .NodeType }}" + app.kubernetes.io/name: "host-checker" + app.kubernetes.io/instance: "host-checker-{{ .NodeType }}" + app.kubernetes.io/component: "host-checker" + app.kubernetes.io/part-of: "multus-validation-test" + app.kubernetes.io/managed-by: "rook-cli" +spec: + selector: + matchLabels: + app: multus-validation-test-host-checker + nodeType: "{{ .NodeType }}" + template: + metadata: + labels: + app: multus-validation-test-host-checker + nodeType: "{{ .NodeType }}" + spec: + nodeSelector: + {{- range $k, $v := .Placement.NodeSelector }} + {{ $k }}: {{ $v }} + {{- end }} + tolerations: + {{- range $idx, $toleration := .Placement.Tolerations }} + - {{ $toleration.ToJSON }} + {{- end }} + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + hostNetwork: true + containers: + - name: readiness-check-web-server-public-addr + # use nginx image because it's already used for the web server pod and has a non-root user + image: "{{ .NginxImage }}" + command: + - sleep + - infinity + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + # A readiness probe makes validation testing easier than investigate container logs. + # Additionally, readiness probe failures don't result in CrashLoopBackoff -- ideal here, + # where ever-longer back-offs would cause tests to run for much longer than necessary. + readinessProbe: + # Low failure threshold and high success threshold. Intended to be very sensitive to + # failures. If probe fails with any regularity, Ceph OSDs likely won't be stable. + failureThreshold: 1 + successThreshold: 12 + periodSeconds: 5 + # Assumption: a network with a latency more than 4 seconds for this validation test's + # simple client-server response likely won't support acceptable performance for any + # production Ceph cluster. + timeoutSeconds: 4 + # TODO: exec:curl works but httpGet fails. Why? need custom header? + exec: + command: + - "curl" + - "--insecure" + - "{{ .PublicNetworkAddress }}:8080" diff --git a/pkg/daemon/multus/resources.go b/pkg/daemon/multus/resources.go index a058e4862f18..81f457287b29 100644 --- a/pkg/daemon/multus/resources.go +++ b/pkg/daemon/multus/resources.go @@ -157,13 +157,13 @@ func (vt *ValidationTest) startImagePullers(ctx context.Context, owners []meta.O return nil } -func (vt *ValidationTest) deleteImagePullers(ctx context.Context) error { +func (vt *ValidationTest) deleteDaemonsetsWithLabel(ctx context.Context, label string) error { noGracePeriod := int64(0) delOpts := meta.DeleteOptions{ GracePeriodSeconds: &noGracePeriod, } listOpts := meta.ListOptions{ - LabelSelector: imagePullAppLabel(), + LabelSelector: label, } err := vt.Clientset.AppsV1().DaemonSets(vt.Namespace).DeleteCollection(ctx, delOpts, listOpts) if err != nil { @@ -176,6 +176,27 @@ func (vt *ValidationTest) deleteImagePullers(ctx context.Context) error { return nil } +func (vt *ValidationTest) startHostCheckers( + ctx context.Context, + owners []meta.OwnerReference, + serverPublicAddr string, +) error { + for typeName, nodeType := range vt.NodeTypes { + ds, err := vt.generateHostCheckerDaemonSet(serverPublicAddr, typeName, nodeType.Placement) + if err != nil { + return fmt.Errorf("failed to generate host checker daemonset: %w", err) + } + ds.SetOwnerReferences(owners) // set owner so cleanup is easier + + _, err = vt.Clientset.AppsV1().DaemonSets(vt.Namespace).Create(ctx, ds, meta.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create host checker daemonset: %w", err) + } + } + + return nil +} + func (vt *ValidationTest) startClients( ctx context.Context, owners []meta.OwnerReference, @@ -335,10 +356,10 @@ func (vt *ValidationTest) getNumRunningPods( return numRunning, nil } -func (vt *ValidationTest) numClientsReady(ctx context.Context, expectedNumPods int) (int, error) { - pods, err := vt.getClientPods(ctx, expectedNumPods) +func (vt *ValidationTest) numPodsReadyWithLabel(ctx context.Context, label string) (int, error) { + pods, err := vt.getPodsWithLabel(ctx, label) if err != nil { - return 0, fmt.Errorf("unexpected error getting client pods: %w", err) + return 0, fmt.Errorf("unexpected error getting pods with label %q: %w", label, err) } numReady := 0 for _, p := range pods.Items { @@ -349,16 +370,13 @@ func (vt *ValidationTest) numClientsReady(ctx context.Context, expectedNumPods i return numReady, nil } -func (vt *ValidationTest) getClientPods(ctx context.Context, expectedNumPods int) (*core.PodList, error) { +func (vt *ValidationTest) getPodsWithLabel(ctx context.Context, label string) (*core.PodList, error) { listOpts := meta.ListOptions{ - LabelSelector: clientAppLabel(), + LabelSelector: label, } pods, err := vt.Clientset.CoreV1().Pods(vt.Namespace).List(ctx, listOpts) if err != nil { - return nil, fmt.Errorf("failed to list client pods: %w", err) - } - if len(pods.Items) != expectedNumPods { - return nil, fmt.Errorf("the number of pods listed [%d] does not match the number expected [%d]", len(pods.Items), expectedNumPods) + return nil, fmt.Errorf("failed to list pods with label %q: %w", label, err) } return pods, err } diff --git a/pkg/daemon/multus/templates.go b/pkg/daemon/multus/templates.go index 275e0f2a879c..4f8afe23d5d9 100644 --- a/pkg/daemon/multus/templates.go +++ b/pkg/daemon/multus/templates.go @@ -39,6 +39,9 @@ var ( //go:embed image-pull-daemonset.yaml imagePullDaemonSet string + //go:embed host-daemonset.yaml + hostCheckerDaemonSet string + //go:embed client-daemonset.yaml clientDaemonSet string ) @@ -55,6 +58,13 @@ type imagePullTemplateConfig struct { Placement PlacementConfig } +type hostCheckerTemplateConfig struct { + NodeType string + NginxImage string + PublicNetworkAddress string + Placement PlacementConfig +} + type clientTemplateConfig struct { NodeType string ClientType string @@ -73,6 +83,10 @@ func imagePullAppLabel() string { return "app=multus-validation-test-image-pull" } +func hostCheckerAppLabel() string { + return "app=multus-validation-test-host-checker" +} + func getNodeType(m *metav1.ObjectMeta) string { return m.GetLabels()["nodeType"] } @@ -89,6 +103,7 @@ const ( type daemonsetAppType string const imagePullDaemonSetAppType = "image pull" +const hostCheckerDaemonsetAppType = "host checker" const clientDaemonSetAppType = "client" func (vt *ValidationTest) generateWebServerTemplateConfig(placement PlacementConfig) webServerTemplateConfig { @@ -99,6 +114,19 @@ func (vt *ValidationTest) generateWebServerTemplateConfig(placement PlacementCon } } +func (vt *ValidationTest) generateHostCheckerTemplateConfig( + serverPublicAddr string, + nodeType string, + placement PlacementConfig, +) hostCheckerTemplateConfig { + return hostCheckerTemplateConfig{ + NodeType: nodeType, + PublicNetworkAddress: serverPublicAddr, + NginxImage: vt.NginxImage, + Placement: placement, + } +} + func (vt *ValidationTest) generateClientTemplateConfig( attachPublic, attachCluster bool, serverPublicAddr, serverClusterAddr string, @@ -150,6 +178,8 @@ func (vt *ValidationTest) generateWebServerPod(placement PlacementConfig) (*core return nil, fmt.Errorf("failed to unmarshal web server pod template: %w", err) } + vt.applyServiceAccountToPodSpec(&p.Spec) + return &p, nil } @@ -182,6 +212,29 @@ func (vt *ValidationTest) generateImagePullDaemonSet(nodeType string, placement return nil, fmt.Errorf("failed to unmarshal image pull daemonset template: %w", err) } + vt.applyServiceAccountToPodSpec(&d.Spec.Template.Spec) + + return &d, nil +} + +func (vt *ValidationTest) generateHostCheckerDaemonSet( + serverPublicAddr string, + nodeType string, + placement PlacementConfig, +) (*apps.DaemonSet, error) { + t, err := loadTemplate("hostCheckerDaemonSet", hostCheckerDaemonSet, vt.generateHostCheckerTemplateConfig(serverPublicAddr, nodeType, placement)) + if err != nil { + return nil, fmt.Errorf("failed to load host checker daemonset template: %w", err) + } + + var d apps.DaemonSet + err = yaml.Unmarshal(t, &d) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal host checker daemonset template: %w", err) + } + + vt.applyServiceAccountToPodSpec(&d.Spec.Template.Spec) + return &d, nil } @@ -203,6 +256,8 @@ func (vt *ValidationTest) generateClientDaemonSet( return nil, fmt.Errorf("failed to unmarshal client daemonset template: %w", err) } + vt.applyServiceAccountToPodSpec(&d.Spec.Template.Spec) + return &d, nil } @@ -217,6 +272,12 @@ func (vt *ValidationTest) generateNetworksAnnotationValue(public, cluster bool) return strings.Join(nets, ",") } +func (vt *ValidationTest) applyServiceAccountToPodSpec(ps *core.PodSpec) { + if vt.ServiceAccountName != "" { + ps.ServiceAccountName = vt.ServiceAccountName + } +} + func loadTemplate(name, templateFileText string, config interface{}) ([]byte, error) { var writer bytes.Buffer t := template.New(name) diff --git a/pkg/daemon/multus/validation.go b/pkg/daemon/multus/validation.go index d2676e2cc92b..69ab3a2b18da 100644 --- a/pkg/daemon/multus/validation.go +++ b/pkg/daemon/multus/validation.go @@ -137,6 +137,8 @@ func (s *ensureNodeTypesDoNotOverlapState) Run(ctx context.Context, vsm *validat * Reusable state to verify that expected number of pods are "Running" but not necessarily "Ready" * > Verify all image pull pods are running * -- next state --> Delete image pull daemonset + * > Verify all host checker pods are running + * -- next state --> Verify all host checker pods are "Ready" * > Verify all client pods are running * -- next state --> Verify all client pods are "Ready" */ @@ -144,6 +146,7 @@ type verifyAllPodsRunningState struct { AppType daemonsetAppType ImagePullPodsPerNodeType perNodeTypeCount ExpectedNumPods int + WebServerInfo *podNetworkInfo } func (s *verifyAllPodsRunningState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { @@ -155,6 +158,13 @@ func (s *verifyAllPodsRunningState) Run(ctx context.Context, vsm *validationStat podSelectorLabel = imagePullAppLabel() // image pull pods don't have multus labels, so this can't be a multus issue suggestions = append(suggestions, "inability to run image pull pods is likely an issue with Nginx image or Kubernetes itself") + case hostCheckerDaemonsetAppType: + if s.WebServerInfo == nil { + return []string{}, fmt.Errorf("internal error; web server info is nil when checking for host checker readiness") + } + podSelectorLabel = hostCheckerAppLabel() + // host checker pods don't have multus labels, so this can't be a multus issue + suggestions = append(suggestions, "inability to run host checker pods likely means that cluster security permissions disallow pods with host network enabled") case clientDaemonSetAppType: podSelectorLabel = clientAppLabel() suggestions = append(suggestions, "clients not being able to run can mean multus is unable to provide them with addresses") @@ -178,6 +188,11 @@ func (s *verifyAllPodsRunningState) Run(ctx context.Context, vsm *validationStat vsm.SetNextState(&deleteImagePullersState{ ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, }) + case hostCheckerDaemonsetAppType: + vsm.SetNextState(&verifyAllHostCheckersReadyState{ + WebServerInfo: *s.WebServerInfo, + ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, + }) case clientDaemonSetAppType: vsm.vt.Logger.Infof("verifying all %d 'Running' client pods reach 'Ready' state", s.ExpectedNumPods) vsm.SetNextState(&verifyAllClientsReadyState{ @@ -198,7 +213,7 @@ type deleteImagePullersState struct { } func (s *deleteImagePullersState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { - err = vsm.vt.deleteImagePullers(ctx) + err = vsm.vt.deleteDaemonsetsWithLabel(ctx, imagePullAppLabel()) if err != nil { // erroring here is not strictly necessary but does indicate a k8s issue that probably affects future test steps return []string{"inability to delete resources is likely an issue with Kubernetes itself"}, err @@ -212,7 +227,7 @@ func (s *deleteImagePullersState) Run(ctx context.Context, vsm *validationStateM /* * > Get web server info - * -- next state --> Start clients + * -- next state --> Start host checkers */ type getWebServerInfoState struct { ImagePullPodsPerNodeType perNodeTypeCount @@ -240,8 +255,8 @@ func (s *getWebServerInfoState) Run(ctx context.Context, vsm *validationStateMac if err != nil { return suggestions, err } - vsm.vt.Logger.Infof("starting clients on each node") - vsm.SetNextState(&startClientsState{ + vsm.vt.Logger.Infof("starting host checkers on each node") + vsm.SetNextState(&startHostCheckersState{ WebServerInfo: info, ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, }) @@ -249,8 +264,112 @@ func (s *getWebServerInfoState) Run(ctx context.Context, vsm *validationStateMac } /* - * Start clients - * -- next state --> Verify all client pods are running + * > Start host checkers + * -- next state --> Verify all host checker pods are running + */ +type startHostCheckersState struct { + WebServerInfo podNetworkInfo + ImagePullPodsPerNodeType perNodeTypeCount +} + +func (s *startHostCheckersState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { + if vsm.vt.PublicNetwork == "" { + vsm.vt.Logger.Infof("not starting host checkers because public network is not specified") + vsm.SetNextState(&startClientsState{ + WebServerInfo: s.WebServerInfo, + ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, + }) + return []string{}, nil + } + + err = vsm.vt.startHostCheckers(ctx, vsm.resourceOwnerRefs, s.WebServerInfo.publicAddr) + if err != nil { + err = fmt.Errorf("failed to start host checkers: %w", err) + vsm.Exit() // this is a whole validation test failure if we can't start host checkers + return []string{}, err + } + + // we expect only one host checker pod per host, which is the same as the image pullers, so we + // can use that as our expectation + vsm.vt.Logger.Infof("verifying host checker pods begin 'Running': count per node type: %v", s.ImagePullPodsPerNodeType) + vsm.SetNextState(&verifyAllPodsRunningState{ + AppType: hostCheckerDaemonsetAppType, + ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, + ExpectedNumPods: s.ImagePullPodsPerNodeType.Total(), + WebServerInfo: &s.WebServerInfo, + }) + return []string{}, nil +} + +/* + * > Verify all host checker pods are "Ready" + * -- next state --> Delete host checkers + */ +type verifyAllHostCheckersReadyState struct { + WebServerInfo podNetworkInfo + ImagePullPodsPerNodeType perNodeTypeCount +} + +func (s *verifyAllHostCheckersReadyState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { + totalExpected := s.ImagePullPodsPerNodeType.Total() + + numReady, err := vsm.vt.numPodsReadyWithLabel(ctx, hostCheckerAppLabel()) + hostSuggestions := []string{ + "the host may not have a route that directs traffic to the public network", + "the public Network Attachment Definition may not have a route that directs traffic to hosts", + "there may be a network firewall or security policy blocking inter-node traffic on multus networks", + } + if err != nil { + return hostSuggestions, err + } + + if numReady != totalExpected { + return hostSuggestions, fmt.Errorf("number of 'Ready' host checkers [%d] is not the number expected [%d]", numReady, totalExpected) + } + + vsm.vt.Logger.Infof("all %d host checkers are 'Ready'", totalExpected) + vsm.SetNextState(&deleteHostCheckersState{ + WebServerInfo: s.WebServerInfo, + ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, + }) + return []string{}, nil +} + +/* + * > Delete host checkers + * < host check only == true > + * -- next state --> Exit / Done + * < host check only == false > + * -- next state --> Start clients + */ +type deleteHostCheckersState struct { + WebServerInfo podNetworkInfo + ImagePullPodsPerNodeType perNodeTypeCount +} + +func (s *deleteHostCheckersState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { + err = vsm.vt.deleteDaemonsetsWithLabel(ctx, hostCheckerAppLabel()) + if err != nil { + // erroring here is not strictly necessary but does indicate a k8s issue that probably affects future test steps + return []string{"inability to delete resources is likely an issue with Kubernetes itself"}, err + } + + if vsm.vt.HostCheckOnly { + vsm.vt.Logger.Infof("done checking hosts in hostCheckOnly mode; exiting") + vsm.Exit() + return []string{}, nil + } + + vsm.SetNextState(&startClientsState{ + WebServerInfo: s.WebServerInfo, + ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, + }) + return []string{}, nil +} + +/* + * > Start clients + * -- next state --> Verify all client pods are running */ type startClientsState struct { WebServerInfo podNetworkInfo @@ -258,6 +377,8 @@ type startClientsState struct { } func (s *startClientsState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { + vsm.vt.Logger.Infof("starting clients on each node") + podsPerNodeType := perNodeTypeCount{} for nodeType := range vsm.vt.NodeTypes { numClientDaemonsetsStarted, err := vsm.vt.startClients(ctx, vsm.resourceOwnerRefs, s.WebServerInfo.publicAddr, s.WebServerInfo.clusterAddr, nodeType) @@ -277,6 +398,7 @@ func (s *startClientsState) Run(ctx context.Context, vsm *validationStateMachine AppType: clientDaemonSetAppType, ImagePullPodsPerNodeType: s.ImagePullPodsPerNodeType, ExpectedNumPods: podsPerNodeType.Total(), + WebServerInfo: &s.WebServerInfo, }) return []string{}, nil } @@ -295,7 +417,7 @@ type verifyAllClientsReadyState struct { } func (s *verifyAllClientsReadyState) Run(ctx context.Context, vsm *validationStateMachine) (suggestions []string, err error) { - numReady, err := vsm.vt.numClientsReady(ctx, s.ExpectedNumClients) + numReady, err := vsm.vt.numPodsReadyWithLabel(ctx, clientAppLabel()) collocationSuggestion := "if clients on the same node as the web server become ready but not others, " + "there may be a network firewall or security policy blocking inter-node traffic on multus networks" defaultSuggestions := append([]string{collocationSuggestion, flakyNetworkSuggestion}, unableToProvideAddressSuggestions...) @@ -360,6 +482,11 @@ func (vt *ValidationTest) Run(ctx context.Context) (*ValidationTestResults, erro suggestedDebugging: []string{}, } + if vt.PublicNetwork == "" && vt.HostCheckOnly { + vt.Logger.Infof("hostCheckOnly is set, and there is no public network; no host prerequisites are needed, so host check is successful") + return testResults, nil + } + // configmap's purpose is to serve as the owner resource object for all other test resources. // this allows users to clean up a botched test easily just by deleting this configmap owningConfigMap, err := vt.createOwningConfigMap(ctx) diff --git a/pkg/operator/ceph/csi/controller.go b/pkg/operator/ceph/csi/controller.go index 22de3953126a..86473acff5cd 100644 --- a/pkg/operator/ceph/csi/controller.go +++ b/pkg/operator/ceph/csi/controller.go @@ -224,7 +224,7 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e // if at least one cephcluster is present update the csi lograte sidecar // with the first listed ceph cluster specs with logrotate enabled - setCSILogrotateParams(cephClusters.Items) + r.setCSILogrotateParams(cephClusters.Items) err = peermap.CreateOrUpdateConfig(r.opManagerContext, r.context, &peermap.PeerIDMappings{}) if err != nil { @@ -309,7 +309,7 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e return reconcileResult, nil } -func setCSILogrotateParams(cephClustersItems []cephv1.CephCluster) { +func (r *ReconcileCSI) setCSILogrotateParams(cephClustersItems []cephv1.CephCluster) { logger.Debug("set logrotate values in csi param") spec := cephClustersItems[0].Spec for _, cluster := range cephClustersItems { @@ -318,9 +318,9 @@ func setCSILogrotateParams(cephClustersItems []cephv1.CephCluster) { break } } - CSIParam.CsiLogDirPath = spec.DataDirHostPath + csiRootPath = spec.DataDirHostPath if spec.DataDirHostPath == "" { - CSIParam.CsiLogDirPath = k8sutil.DataDir + csiRootPath = k8sutil.DataDir } CSIParam.CSILogRotation = spec.LogCollector.Enabled diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index 3861755e3428..6c314f2c9939 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -20,6 +20,7 @@ import ( "context" _ "embed" "fmt" + "path" "strconv" "strings" "time" @@ -52,7 +53,7 @@ type Param struct { ResizerImage string DriverNamePrefix string KubeletDirPath string - CsiLogDirPath string + CsiLogRootPath string ForceCephFSKernelClient string CephFSKernelMountOptions string CephFSPluginUpdateStrategy string @@ -99,9 +100,9 @@ type Param struct { CSINFSPodLabels map[string]string CSIRBDPodLabels map[string]string CSILogRotation bool + CsiComponentName string CSILogRotationMaxSize string CSILogRotationPeriod string - CSILogFolder string Privileged bool } @@ -188,6 +189,8 @@ var ( LogrotateTemplatePath string holderEnabled bool + + csiRootPath string ) const ( @@ -288,6 +291,8 @@ const ( rbdDriverSuffix = "rbd.csi.ceph.com" cephFSDriverSuffix = "cephfs.csi.ceph.com" nfsDriverSuffix = "nfs.csi.ceph.com" + nodePlugin = "node-plugin" + controllerPlugin = "controller-plugin" ) func CSIEnabled() bool { @@ -359,7 +364,8 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI return errors.Wrap(err, "failed to load rbdplugin template") } if tp.CSILogRotation { - tp.CSILogFolder = "rbd-plugin" + tp.CsiComponentName = nodePlugin + tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName) applyLogrotateSidecar(&rbdPlugin.Spec.Template, "csi-rbd-daemonset-log-collector", LogrotateTemplatePath, tp) } @@ -368,7 +374,8 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI return errors.Wrap(err, "failed to load rbd provisioner deployment template") } if tp.CSILogRotation { - tp.CSILogFolder = "rbd-provisioner" + tp.CsiComponentName = controllerPlugin + tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName) applyLogrotateSidecar(&rbdProvisionerDeployment.Spec.Template, "csi-rbd-deployment-log-collector", LogrotateTemplatePath, tp) } @@ -394,11 +401,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI if err != nil { return errors.Wrap(err, "failed to load CephFS plugin template") } + if tp.CSILogRotation { + tp.CsiComponentName = nodePlugin + tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName) + applyLogrotateSidecar(&cephfsPlugin.Spec.Template, "csi-cephfs-daemonset-log-collector", LogrotateTemplatePath, tp) + } cephfsProvisionerDeployment, err = templateToDeployment("cephfs-provisioner", CephFSProvisionerDepTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load rbd provisioner deployment template") } + if tp.CSILogRotation { + tp.CsiComponentName = controllerPlugin + tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName) + applyLogrotateSidecar(&cephfsProvisionerDeployment.Spec.Template, "csi-cephfs-deployment-log-collector", LogrotateTemplatePath, tp) + } + // Create service if either liveness or GRPC metrics are enabled. if CSIParam.EnableLiveness { cephfsService, err = templateToService("cephfs-service", CephFSPluginServiceTemplatePath, tp) @@ -422,11 +440,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI if err != nil { return errors.Wrap(err, "failed to load nfs plugin template") } + if tp.CSILogRotation { + tp.CsiComponentName = nodePlugin + tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName) + applyLogrotateSidecar(&nfsPlugin.Spec.Template, "csi-nfs-daemonset-log-collector", LogrotateTemplatePath, tp) + } nfsProvisionerDeployment, err = templateToDeployment("nfs-provisioner", NFSProvisionerDepTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load nfs provisioner deployment template") } + if tp.CSILogRotation { + tp.CsiComponentName = controllerPlugin + tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName) + applyLogrotateSidecar(&nfsProvisionerDeployment.Spec.Template, "csi-nfs-deployment-log-collector", LogrotateTemplatePath, tp) + } + enabledDrivers = append(enabledDrivers, driverDetails{ name: NFSDriverShortName, fullName: NFSDriverName, diff --git a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml index 5c84038fae2d..1de802aed631 100644 --- a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml +++ b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml @@ -119,6 +119,11 @@ spec: - "--drivername={{ .DriverNamePrefix }}cephfs.csi.ceph.com" - "--pidlimit=-1" - "--forcecephkernelclient={{ .ForceCephFSKernelClient }}" + {{ if .CSILogRotation }} + - "--logtostderr=false" + - "--alsologtostderr=true" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-cephfsplugin.log" + {{ end }} {{ if .CSIEnableMetadata }} - "--setmetadata={{ .CSIEnableMetadata }}" {{ end }} @@ -166,6 +171,10 @@ spec: mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys + {{ if .CSILogRotation }} + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + name: csi-log + {{ end }} {{ if .MountCustomCephConf }} - name: ceph-config mountPath: /etc/ceph/ceph.conf @@ -191,6 +200,11 @@ spec: - "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}" - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" + {{ if .CSILogRotation }} + - "--logtostderr=false" + - "--alsologtostderr=true" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-addons.log" + {{ end }} ports: - containerPort: {{ .CSIAddonsPort }} env: @@ -220,6 +234,10 @@ spec: volumeMounts: - name: socket-dir mountPath: /csi + {{ if .CSILogRotation }} + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + name: csi-log + {{ end }} {{ end }} {{ if .EnableLiveness }} - name: liveness-prometheus @@ -267,6 +285,15 @@ spec: emptyDir: { medium: "Memory" } + {{ if .CSILogRotation }} + - name: csi-log + hostPath: + path: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + type: DirectoryOrCreate + - name: csi-logs-logrotate + emptyDir: + type: DirectoryOrCreate + {{ end }} {{ if .MountCustomCephConf }} - name: ceph-config configMap: diff --git a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin.yaml b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin.yaml index c3a41d7a4c01..5ce364cd7256 100644 --- a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin.yaml +++ b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin.yaml @@ -72,6 +72,11 @@ spec: - "--drivername={{ .DriverNamePrefix }}cephfs.csi.ceph.com" - "--pidlimit=-1" - "--forcecephkernelclient={{ .ForceCephFSKernelClient }}" + {{ if .CSILogRotation }} + - "--logtostderr=false" + - "--alsologtostderr=true" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-cephfsplugin.log" + {{ end }} {{ if .CephFSKernelMountOptions }} - "--kernelmountoptions={{ .CephFSKernelMountOptions }}" {{ end }} @@ -115,6 +120,10 @@ spec: mountPath: /tmp/csi/keys - name: host-run-mount mountPath: /run/mount + {{ if .CSILogRotation }} + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + name: csi-log + {{ end }} {{ if .EnablePluginSelinuxHostMount }} - name: etc-selinux mountPath: /etc/selinux @@ -198,6 +207,15 @@ spec: - name: host-run-mount hostPath: path: /run/mount + {{ if .CSILogRotation }} + - name: csi-log + hostPath: + path: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + type: DirectoryOrCreate + - name: csi-logs-logrotate + emptyDir: + type: DirectoryOrCreate + {{ end }} {{ if .EnablePluginSelinuxHostMount }} - name: etc-selinux hostPath: diff --git a/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml b/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml index 63a47286cedc..e739862fa755 100644 --- a/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml +++ b/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml @@ -1,18 +1,18 @@ args: - | echo "Starting the csi-logrotate-sidecar" - mkdir -p {{ .CsiLogDirPath }}/cephcsi/logrotate-config/{{ .CSILogFolder }}; - echo '{{ .CsiLogDirPath }}cephcsi/log/{{ .CSILogFolder }}/*.log { + mkdir -p {{ .CsiLogRootPath }}/logrotate-config/{{ .CsiComponentName }} + echo '{{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/*.log { {{ .CSILogRotationPeriod }} missingok rotate 7 compress copytruncate notifempty - }' > {{ .CsiLogDirPath }}/cephcsi/logrotate-config/{{ .CSILogFolder }}/csi; - echo "File creation container completed"; + }' > {{ .CsiLogRootPath }}/logrotate-config/{{ .CsiComponentName }}/csi + echo "File creation container completed" - LOG_ROTATE_CEPH_CSI_FILE={{ .CsiLogDirPath }}/cephcsi/logrotate-config/{{ .CSILogFolder }}/csi + LOG_ROTATE_CEPH_CSI_FILE={{ .CsiLogRootPath }}/logrotate-config/{{ .CsiComponentName }}/csi LOG_MAX_SIZE={{ .CSILogRotationMaxSize }} if [ "$LOG_MAX_SIZE" != "0" ]; then sed --in-place "4i \ \ \ \ maxsize $LOG_MAX_SIZE" "$LOG_ROTATE_CEPH_CSI_FILE" @@ -29,7 +29,7 @@ image: {{ .CSIPluginImage }} imagePullPolicy: IfNotPresent name: log-collector volumeMounts: - - mountPath: {{ .CsiLogDirPath }}/cephcsi/logrotate-config/{{ .CSILogFolder }} + - mountPath: {{ .CsiLogRootPath }}/logrotate-config/{{ .CsiComponentName }} name: csi-logs-logrotate - - mountPath: {{ .CsiLogDirPath }}/cephcsi/log/{{ .CSILogFolder }} + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} name: csi-log diff --git a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml index b134b7b4f802..8674c49f486f 100644 --- a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml +++ b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml @@ -113,6 +113,11 @@ spec: - "--controllerserver=true" - "--drivername={{ .DriverNamePrefix }}nfs.csi.ceph.com" - "--pidlimit=-1" + {{ if .CSILogRotation }} + - "--logtostderr=false" + - "--alsologtostderr=true" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-nfsplugin.log" + {{ end }} env: - name: POD_IP valueFrom: @@ -147,6 +152,10 @@ spec: mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys + {{ if .CSILogRotation }} + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + name: csi-log + {{ end }} {{ if .MountCustomCephConf }} - name: ceph-config mountPath: /etc/ceph/ceph.conf @@ -176,6 +185,15 @@ spec: emptyDir: { medium: "Memory" } + {{ if .CSILogRotation }} + - name: csi-log + hostPath: + path: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + type: DirectoryOrCreate + - name: csi-logs-logrotate + emptyDir: + type: DirectoryOrCreate + {{ end }} {{ if .MountCustomCephConf }} - name: ceph-config configMap: diff --git a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin.yaml b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin.yaml index 2572a205e907..67d2a93ca2f5 100644 --- a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin.yaml +++ b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin.yaml @@ -66,6 +66,11 @@ spec: - "--nodeserver=true" - "--drivername={{ .DriverNamePrefix }}nfs.csi.ceph.com" - "--pidlimit=-1" + {{ if .CSILogRotation }} + - "--logtostderr=false" + - "--alsologtostderr=true" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-nfsplugin.log" + {{ end }} env: - name: NODE_ID valueFrom: @@ -98,6 +103,10 @@ spec: mountPath: /tmp/csi/keys - name: host-run-mount mountPath: /run/mount + {{ if .CSILogRotation }} + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + name: csi-log + {{ end }} {{ if .EnablePluginSelinuxHostMount }} - name: etc-selinux mountPath: /etc/selinux @@ -147,6 +156,15 @@ spec: - name: host-run-mount hostPath: path: /run/mount + {{ if .CSILogRotation }} + - name: csi-log + hostPath: + path: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} + type: DirectoryOrCreate + - name: csi-logs-logrotate + emptyDir: + type: DirectoryOrCreate + {{ end }} {{ if .EnablePluginSelinuxHostMount }} - name: etc-selinux hostPath: diff --git a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml index 6c01c1c98b58..0eaa0ba70115 100644 --- a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml +++ b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml @@ -158,7 +158,7 @@ spec: {{ if .CSILogRotation }} - "--logtostderr=false" - "--alsologtostderr=true" - - "--log_file={{ .CsiLogDirPath }}/cephcsi/log/rbd-provisioner/csi-addons.log" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-addons.log" {{ end }} ports: - containerPort: {{ .CSIAddonsPort }} @@ -190,7 +190,7 @@ spec: - name: socket-dir mountPath: /csi {{ if .CSILogRotation }} - - mountPath: {{ .CsiLogDirPath }}/cephcsi/log/rbd-provisioner + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} name: csi-log {{ end }} {{ end }} @@ -207,7 +207,7 @@ spec: {{ if .CSILogRotation }} - "--logtostderr=false" - "--alsologtostderr=true" - - "--log_file={{ .CsiLogDirPath }}/cephcsi/log/rbd-provisioner/csi-rbdplugin.log" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-rbdplugin.log" {{ end }} {{ if .EnableCSIAddonsSideCar }} - "--csi-addons-endpoint=$(CSIADDONS_ENDPOINT)" @@ -248,7 +248,7 @@ spec: - mountPath: /dev name: host-dev {{ if .CSILogRotation }} - - mountPath: {{ .CsiLogDirPath }}/cephcsi/log/rbd-provisioner + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} name: csi-log {{ end }} - mountPath: /sys @@ -298,13 +298,15 @@ spec: - name: host-dev hostPath: path: /dev + {{ if .CSILogRotation }} - name: csi-log hostPath: - path: {{ .CsiLogDirPath }}/cephcsi/log/rbd-provisioner + path: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} type: DirectoryOrCreate - name: csi-logs-logrotate emptyDir: type: DirectoryOrCreate + {{ end }} - name: host-sys hostPath: path: /sys diff --git a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin.yaml b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin.yaml index 733cfa2ddd87..93f93be4bbb0 100644 --- a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin.yaml +++ b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin.yaml @@ -76,7 +76,7 @@ spec: {{ if .CSILogRotation }} - "--logtostderr=false" - "--alsologtostderr=true" - - "--log_file={{ .CsiLogDirPath }}/cephcsi/log/rbd-plugin/csi-rbdplugin.log" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-rbdplugin.log" {{ end }} {{ if .EnableCSIAddonsSideCar }} - "--csi-addons-endpoint=$(CSIADDONS_ENDPOINT)" @@ -127,7 +127,7 @@ spec: - name: host-run-mount mountPath: /run/mount {{ if .CSILogRotation }} - - mountPath: {{ .CsiLogDirPath }}/cephcsi/log/rbd-plugin + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} name: csi-log {{ end }} {{ if .EnablePluginSelinuxHostMount }} @@ -166,7 +166,7 @@ spec: {{ if .CSILogRotation }} - "--logtostderr=false" - "--alsologtostderr=true" - - "--log_file={{ .CsiLogDirPath }}/cephcsi/log/rbd-plugin/csi-addons.log" + - "--log_file={{ .CsiLogRootPath }}/log/{{ .CsiComponentName }}/csi-addons.log" {{ end }} ports: - containerPort: {{ .CSIAddonsPort }} @@ -194,7 +194,7 @@ spec: - name: plugin-dir mountPath: /csi {{ if .CSILogRotation }} - - mountPath: {{ .CsiLogDirPath }}/cephcsi/log/rbd-plugin + - mountPath: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} name: csi-log {{ end }} {{ end }} @@ -229,13 +229,15 @@ spec: hostPath: path: "{{ .KubeletDirPath }}/plugins/{{ .DriverNamePrefix }}rbd.csi.ceph.com" type: DirectoryOrCreate + {{ if .CSILogRotation }} - name: csi-log hostPath: - path: {{ .CsiLogDirPath }}/cephcsi/log/rbd-plugin + path: {{ .CsiLogRootPath }}/log/{{ .CsiComponentName }} type: DirectoryOrCreate - name: csi-logs-logrotate emptyDir: type: DirectoryOrCreate + {{ end }} - name: plugin-mount-dir hostPath: path: "{{ .KubeletDirPath }}/plugins" diff --git a/pkg/operator/ceph/csi/util_test.go b/pkg/operator/ceph/csi/util_test.go index 7ea2f09b73bc..28b4f8896d10 100644 --- a/pkg/operator/ceph/csi/util_test.go +++ b/pkg/operator/ceph/csi/util_test.go @@ -120,13 +120,22 @@ func Test_applyVolumeToPodSpec(t *testing.T) { Param: CSIParam, Namespace: "foo", } - // rbdplugin has 13 volumes by default - defaultVolumes := 13 + // rbdplugin has 11 volumes by default + defaultVolumes := 11 ds, err := templateToDaemonSet(dsName, RBDPluginTemplatePath, tp) assert.Nil(t, err) applyVolumeToPodSpec(config, configKey, &ds.Spec.Template.Spec) assert.Len(t, ds.Spec.Template.Spec.Volumes, defaultVolumes) + + // enable csi logrotate, two more volume mounts get added + tp.CSILogRotation = true + ds, err = templateToDaemonSet(dsName, RBDPluginTemplatePath, tp) + assert.Nil(t, err) + applyVolumeToPodSpec(config, configKey, &ds.Spec.Template.Spec) + assert.Len(t, ds.Spec.Template.Spec.Volumes, defaultVolumes+2) + tp.CSILogRotation = false + // add new volume volumes := []corev1.Volume{ { @@ -198,6 +207,15 @@ func Test_applyVolumeMountToContainer(t *testing.T) { applyVolumeMountToContainer(config, configKey, rbdContainerName, &ds.Spec.Template.Spec) assert.Len(t, ds.Spec.Template.Spec.Containers[1].VolumeMounts, defaultVolumes) + + // enable csi logrotate, one more volumes get added + tp.CSILogRotation = true + ds, err = templateToDaemonSet(dsName, RBDPluginTemplatePath, tp) + assert.Nil(t, err) + applyVolumeMountToContainer(config, configKey, rbdContainerName, &ds.Spec.Template.Spec) + assert.Len(t, ds.Spec.Template.Spec.Containers[1].VolumeMounts, defaultVolumes+1) + tp.CSILogRotation = false + // add new volume mount volumeMounts := []corev1.VolumeMount{ { diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go index 58d0f13379cf..24d080ebf644 100644 --- a/pkg/operator/ceph/pool/controller.go +++ b/pkg/operator/ceph/pool/controller.go @@ -366,7 +366,7 @@ func (r *ReconcileCephBlockPool) reconcileCreatePool(clusterInfo *cephclient.Clu poolSpec := cephBlockPool.ToNamedPoolSpec() err := createPool(r.context, clusterInfo, cephCluster, &poolSpec) if err != nil { - return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to create pool %q.", cephBlockPool.GetName()) + return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to configure pool %q.", cephBlockPool.GetName()) } // Let's return here so that on the initial creation we don't check for update right away @@ -382,7 +382,7 @@ func createPool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, // create the pool logger.Infof("creating pool %q in namespace %q", p.Name, clusterInfo.Namespace) if err := cephclient.CreatePool(context, clusterInfo, clusterSpec, *p); err != nil { - return errors.Wrapf(err, "failed to create pool %q", p.Name) + return errors.Wrapf(err, "failed to configure pool %q", p.Name) } if p.Application != poolApplicationNameRBD { diff --git a/tests/scripts/multus/default-public-cluster-nads.yaml b/tests/scripts/multus/default-public-cluster-nads.yaml index bd8e96b51321..28ccd184ae9d 100644 --- a/tests/scripts/multus/default-public-cluster-nads.yaml +++ b/tests/scripts/multus/default-public-cluster-nads.yaml @@ -7,7 +7,20 @@ metadata: labels: annotations: spec: - config: '{ "cniVersion": "0.3.1", "type": "macvlan", "master": "eth0", "mode": "bridge", "ipam": { "type": "whereabouts", "range": "192.168.20.0/24" } }' + config: | + { + "cniVersion": "0.3.1", + "type": "macvlan", + "master": "eth0", + "mode": "bridge", + "ipam": { + "type": "whereabouts", + "range": "192.168.20.0/24", + "routes": [ + {"dst": "192.168.29.0/24"} + ] + } + } --- apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition @@ -17,4 +30,14 @@ metadata: labels: annotations: spec: - config: '{ "cniVersion": "0.3.1", "type": "macvlan", "master": "eth0", "mode": "bridge", "ipam": { "type": "whereabouts", "range": "fc00::/96" } }' + config: | + { + "cniVersion": "0.3.1", + "type": "macvlan", + "master": "eth0", + "mode": "bridge", + "ipam": { + "type": "whereabouts", + "range": "fc00::/96" + } + } diff --git a/tests/scripts/multus/host-cfg-ds.yaml b/tests/scripts/multus/host-cfg-ds.yaml new file mode 100644 index 000000000000..ca95d4f66798 --- /dev/null +++ b/tests/scripts/multus/host-cfg-ds.yaml @@ -0,0 +1,68 @@ +#################################################################################################### +# this daemonset configures each minikube or KinD node to route to the multus public net +# it is not suitable for production +#################################################################################################### +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: host-net-config + labels: + app: host-net-config +spec: + selector: + matchLabels: + app: host-net-config + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100 # allow updating all at once + template: + metadata: + labels: + app: host-net-config + spec: + hostNetwork: true + tolerations: + # also run on control plane nodes for KinD testing + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + terminationGracePeriodSeconds: 0 # allow updating/deleting immediately + containers: + - name: test + image: quay.io/ceph/ceph:v18 + env: + - name: IFACE_NAME + value: eth0 # IFACE_NAME + command: + - bash + - -x + - -c + args: + - | + IFACE='$(IFACE_NAME)' + + NODE_PUBLIC_NET_IP_FIRST3='192.168.29' + NET_ATT_DEF_PUBLIC_NET_CIDR='192.168.20.0/24' + + # get the last part of the interface's IP addr + ip -4 addr show $IFACE + ip_cidr="$(ip -4 addr show $IFACE | grep inet | awk '{print $2}')" # e.g., 192.168.100.3/24 + ip="${ip_cidr%/*}" # e.g., 192.168.100.3 + last="${ip##*.}" # e.g., 3 + + # add a shim to connect IFACE to the macvlan public network, with a static IP + # avoid IP conflicts by re-using the last part of the existing IFACE IP + ip link add public-shim link ${IFACE} type macvlan mode bridge + ip addr add ${NODE_PUBLIC_NET_IP_FIRST3}.${last}/24 dev public-shim + ip link set public-shim up + ip route add ${NET_ATT_DEF_PUBLIC_NET_CIDR} dev public-shim + + ip addr show $IFACE + ip addr show public-shim + ip route show + + sleep infinity + resources: {} + securityContext: + capabilities: + add: ["NET_ADMIN"]