From 7b78cc7ba62ac6244ec255e1e74671ec5ac71ad5 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Tue, 23 Jan 2024 22:35:39 +0530 Subject: [PATCH 1/8] csi: add default crushLocationLabels to csiConfigMap This commit adds the default crush location labels to csi configMap. Signed-off-by: Praveen M --- pkg/operator/ceph/csi/cluster_config.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/operator/ceph/csi/cluster_config.go b/pkg/operator/ceph/csi/cluster_config.go index 42f76141264d..a552c0a196bd 100644 --- a/pkg/operator/ceph/csi/cluster_config.go +++ b/pkg/operator/ceph/csi/cluster_config.go @@ -149,6 +149,9 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C if newCsiClusterConfigEntry.RBD.RadosNamespace != "" || newCsiClusterConfigEntry.RBD.NetNamespaceFilePath != "" { centry.RBD = newCsiClusterConfigEntry.RBD } + if len(newCsiClusterConfigEntry.ReadAffinity.CrushLocationLabels) != 0 { + centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity + } found = true cc[i] = centry break @@ -171,6 +174,9 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C if newCsiClusterConfigEntry.NFS.NetNamespaceFilePath != "" { centry.NFS = newCsiClusterConfigEntry.NFS } + if len(newCsiClusterConfigEntry.ReadAffinity.CrushLocationLabels) != 0 { + centry.ReadAffinity = newCsiClusterConfigEntry.ReadAffinity + } cc = append(cc, centry) } } From f67bbc834bea44bf563090f836389fa578cebd66 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 25 Jan 2024 19:32:33 +0530 Subject: [PATCH 2/8] csi: fix netNamespaceFilePath addition and removal for all clusterIDs This commit resolves the issue where the netNamespaceFilePath was not being updated for all clusterIDs present in the csi configMap. Signed-off-by: Praveen M --- pkg/operator/ceph/csi/cluster_config.go | 46 +++- pkg/operator/ceph/csi/cluster_config_test.go | 249 +++++++++++++++++++ pkg/operator/ceph/csi/spec.go | 13 +- 3 files changed, 304 insertions(+), 4 deletions(-) diff --git a/pkg/operator/ceph/csi/cluster_config.go b/pkg/operator/ceph/csi/cluster_config.go index a552c0a196bd..7775b0e9c8e7 100644 --- a/pkg/operator/ceph/csi/cluster_config.go +++ b/pkg/operator/ceph/csi/cluster_config.go @@ -103,6 +103,49 @@ func MonEndpoints(mons map[string]*cephclient.MonInfo, requireMsgr2 bool) []stri return endpoints } +// updateNetNamespaceFilePath modify the netNamespaceFilePath for all cluster IDs. +// If holderEnabled is set to true. Otherwise, removes the netNamespaceFilePath value +// for all the clusterIDs. +func updateNetNamespaceFilePath(clusterNamespace string, cc csiClusterConfig) { + var ( + cephFSNetNamespaceFilePath string + rbdNetNamespaceFilePath string + nfsNetNamespaceFilePath string + ) + + if IsHolderEnabled() { + for _, centry := range cc { + if centry.Namespace == clusterNamespace && centry.ClusterID == clusterNamespace { + if centry.CephFS.NetNamespaceFilePath != "" { + cephFSNetNamespaceFilePath = centry.CephFS.NetNamespaceFilePath + } + if centry.RBD.NetNamespaceFilePath != "" { + rbdNetNamespaceFilePath = centry.RBD.NetNamespaceFilePath + } + if centry.NFS.NetNamespaceFilePath != "" { + nfsNetNamespaceFilePath = centry.NFS.NetNamespaceFilePath + } + } + } + + for i, centry := range cc { + if centry.Namespace == clusterNamespace { + cc[i].CephFS.NetNamespaceFilePath = cephFSNetNamespaceFilePath + cc[i].RBD.NetNamespaceFilePath = rbdNetNamespaceFilePath + cc[i].NFS.NetNamespaceFilePath = nfsNetNamespaceFilePath + } + } + } else { + for i := range cc { + if cc[i].Namespace == clusterNamespace { + cc[i].CephFS.NetNamespaceFilePath = "" + cc[i].RBD.NetNamespaceFilePath = "" + cc[i].NFS.NetNamespaceFilePath = "" + } + } + } +} + // updateCsiClusterConfig returns a json-formatted string containing // the cluster-to-mon mapping required to configure ceph csi. func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *CSIClusterConfigEntry) (string, error) { @@ -164,7 +207,7 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C centry.ClusterID = clusterKey centry.Namespace = newCsiClusterConfigEntry.Namespace centry.Monitors = newCsiClusterConfigEntry.Monitors - if newCsiClusterConfigEntry.RBD.RadosNamespace != "" || newCsiClusterConfigEntry.CephFS.NetNamespaceFilePath != "" { + if newCsiClusterConfigEntry.RBD.RadosNamespace != "" || newCsiClusterConfigEntry.RBD.NetNamespaceFilePath != "" { centry.RBD = newCsiClusterConfigEntry.RBD } // Add a condition not to fill with empty values @@ -181,6 +224,7 @@ func updateCsiClusterConfig(curr, clusterKey string, newCsiClusterConfigEntry *C } } + updateNetNamespaceFilePath(clusterKey, cc) return formatCsiClusterConfig(cc) } diff --git a/pkg/operator/ceph/csi/cluster_config_test.go b/pkg/operator/ceph/csi/cluster_config_test.go index 7d8ac69fcfbd..9a87c39fc91d 100644 --- a/pkg/operator/ceph/csi/cluster_config_test.go +++ b/pkg/operator/ceph/csi/cluster_config_test.go @@ -18,11 +18,13 @@ package csi import ( "encoding/json" + "reflect" "strings" "testing" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/ceph/cluster/osd/topology" "github.com/stretchr/testify/assert" cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes" @@ -87,6 +89,16 @@ func TestUpdateCsiClusterConfig(t *testing.T) { }, }, } + csiClusterConfigEntry4 := CSIClusterConfigEntry{ + Namespace: "rook-ceph-4", + ClusterInfo: cephcsi.ClusterInfo{ + Monitors: []string{"10.1.1.1:5000"}, + ReadAffinity: cephcsi.ReadAffinity{ + Enabled: true, + CrushLocationLabels: strings.Split(topology.GetDefaultTopologyLabels(), ","), + }, + }, + } var want, s string var err error @@ -376,6 +388,14 @@ func TestUpdateCsiClusterConfig(t *testing.T) { assert.NoError(t, err) compareJSON(t, `[{"clusterID":"rook-ceph-1","monitors":["1.2.3.4:5000"],"rbd":{"netNamespaceFilePath":"/var/run/netns/rook-ceph-1","radosNamespace":"rook-ceph-1"},"namespace":"rook-ceph-1"}]`, s) }) + + t.Run("test crush location labels are set", func(t *testing.T) { + s, err = updateCsiClusterConfig("[]", "rook-ceph-4", &csiClusterConfigEntry4) + assert.NoError(t, err) + compareJSON(t, `[{"clusterID":"rook-ceph-4","monitors":["10.1.1.1:5000"],"readAffinity": {"enabled": true, "crushLocationLabels":["kubernetes.io/hostname", + "topology.kubernetes.io/region","topology.kubernetes.io/zone","topology.rook.io/chassis","topology.rook.io/rack","topology.rook.io/row","topology.rook.io/pdu", + "topology.rook.io/pod","topology.rook.io/room","topology.rook.io/datacenter"]},"namespace":"rook-ceph-4"}]`, s) + }) } func contains(src, dest []string) bool { @@ -607,3 +627,232 @@ func TestUpdateCSIDriverOptions(t *testing.T) { }) } } + +func TestUpdateNetNamespaceFilePath(t *testing.T) { + type args struct { + clusterConfig csiClusterConfig + clusterKey string + holderEnabled bool + } + + tests := []struct { + name string + args args + want csiClusterConfig + }{ + { + name: "empty", + args: args{ + clusterKey: "rook-ceph", + holderEnabled: false, + clusterConfig: []CSIClusterConfigEntry{}, + }, + want: []CSIClusterConfigEntry{}, + }, + { + name: "holder enabled", + args: args{ + clusterKey: "rook-ceph", + holderEnabled: true, + clusterConfig: []CSIClusterConfigEntry{ + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "rook-ceph", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + NetNamespaceFilePath: "cephfs.net.ns", + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: "rbd.net.ns", + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: "nfs.net.ns", + }, + }, + }, + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "cluster-1", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + SubvolumeGroup: "csi", + }, + }, + }, + }, + }, + want: []CSIClusterConfigEntry{ + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "rook-ceph", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + NetNamespaceFilePath: "cephfs.net.ns", + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: "rbd.net.ns", + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: "nfs.net.ns", + }, + }, + }, + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "cluster-1", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + SubvolumeGroup: "csi", + NetNamespaceFilePath: "cephfs.net.ns", + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: "rbd.net.ns", + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: "nfs.net.ns", + }, + }, + }, + }, + }, + { + name: "holder disabled", + args: args{ + clusterKey: "rook-ceph", + holderEnabled: false, + clusterConfig: []CSIClusterConfigEntry{ + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "rook-ceph", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + NetNamespaceFilePath: "cephfs.net.ns", + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: "rbd.net.ns", + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: "nfs.net.ns", + }, + }, + }, + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "cluster-1", + Monitors: []string{"1.1.1.1"}, + RBD: cephcsi.RBD{ + RadosNamespace: "group-1", + }, + }, + }, + }, + }, + want: []CSIClusterConfigEntry{ + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "rook-ceph", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + NetNamespaceFilePath: "", + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: "", + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: "", + }, + }, + }, + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "cluster-1", + Monitors: []string{"1.1.1.1"}, + CephFS: cephcsi.CephFS{ + NetNamespaceFilePath: "", + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: "", + RadosNamespace: "group-1", + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: "", + }, + }, + }, + }, + }, + } + + cephfsNsFilePath := "cephfs.net.ns" + rbdNsFilePath := "rbd.net.ns" + nfsNsFilePath := "nfs.net.ns" + + csiConfigMap := csiClusterConfig{ + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "rook-ceph", + CephFS: cephcsi.CephFS{ + NetNamespaceFilePath: cephfsNsFilePath, + }, + RBD: cephcsi.RBD{ + NetNamespaceFilePath: rbdNsFilePath, + }, + NFS: cephcsi.NFS{ + NetNamespaceFilePath: nfsNsFilePath, + }, + }, + }, + { + Namespace: "rook-ceph", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "svg", + }, + }, + { + Namespace: "default", + ClusterInfo: cephcsi.ClusterInfo{ + ClusterID: "rook-ceph", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + holderEnabled = tt.args.holderEnabled + updateNetNamespaceFilePath("rook-ceph", tt.args.clusterConfig) + assert.True(t, reflect.DeepEqual(tt.args.clusterConfig, tt.want)) + }) + } + + t.Run("Holder enabled and disabled later", func(t *testing.T) { + holderEnabled = true + updateNetNamespaceFilePath("rook-ceph", csiConfigMap) + for _, c := range csiConfigMap { + if c.Namespace == "rook-ceph" { + assert.Equal(t, cephfsNsFilePath, c.CephFS.NetNamespaceFilePath) + assert.Equal(t, rbdNsFilePath, c.RBD.NetNamespaceFilePath) + assert.Equal(t, nfsNsFilePath, c.NFS.NetNamespaceFilePath) + } + } + + holderEnabled = false + updateNetNamespaceFilePath("rook-ceph", csiConfigMap) + for _, c := range csiConfigMap { + if c.Namespace == "rook-ceph" { + assert.Equal(t, "", c.CephFS.NetNamespaceFilePath) + assert.Equal(t, "", c.RBD.NetNamespaceFilePath) + assert.Equal(t, "", c.NFS.NetNamespaceFilePath) + + } + } + }) +} diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index 9190f031f7e8..eab50e5dcfec 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -120,7 +120,7 @@ var ( AllowUnsupported = false CustomCSICephConfigExists = false - //driver names + // driver names CephFSDriverName string NFSDriverName string RBDDriverName string @@ -174,6 +174,8 @@ var ( NFSProvisionerDepTemplatePath string //go:embed template/nfs/csi-nfsplugin-holder.yaml NFSPluginHolderTemplatePath string + + holderEnabled bool ) const ( @@ -274,6 +276,10 @@ func CSIEnabled() bool { return EnableRBD || EnableCephFS || EnableNFS } +func IsHolderEnabled() bool { + return holderEnabled +} + func validateCSIParam() error { if len(CSIParam.CSIPluginImage) == 0 { return errors.New("missing csi rbd plugin image") @@ -391,7 +397,7 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI }) } - holderEnabled := !CSIParam.EnableCSIHostNetwork + holderEnabled = !CSIParam.EnableCSIHostNetwork for i := range r.clustersWithHolder { if r.clustersWithHolder[i].cluster.Spec.Network.IsMultus() { @@ -749,7 +755,6 @@ func (r *ReconcileCSI) validateCSIVersion(ownerInfo *k8sutil.OwnerInfo) (*CephCS CSIParam.CSIPluginImage, corev1.PullPolicy(CSIParam.ImagePullPolicy), ) - if err != nil { return nil, errors.Wrap(err, "failed to set up ceph CSI version job") } @@ -890,6 +895,7 @@ func (r *ReconcileCSI) configureHolder(driver driverDetails, c ClusterDetail, tp }, }, } + netNamespaceFilePath := generateNetNamespaceFilePath(CSIParam.KubeletDirPath, driver.fullName, c.cluster.Namespace) if driver.name == RBDDriverShortName { clusterConfigEntry.RBD.NetNamespaceFilePath = netNamespaceFilePath @@ -900,6 +906,7 @@ func (r *ReconcileCSI) configureHolder(driver driverDetails, c ClusterDetail, tp if driver.name == NFSDriverShortName { clusterConfigEntry.NFS.NetNamespaceFilePath = netNamespaceFilePath } + // Save the path of the network namespace file for ceph-csi to use err = SaveClusterConfig(r.context.Clientset, c.cluster.Namespace, c.clusterInfo, clusterConfigEntry) if err != nil { From c38612c28068c7f8ae3fb490366621aa9c4b5551 Mon Sep 17 00:00:00 2001 From: Michael Adam Date: Tue, 30 Jan 2024 17:58:02 +0100 Subject: [PATCH 3/8] ci: add "network" to the allowed commit message prefixes Signed-off-by: Michael Adam --- .commitlintrc.json | 1 + 1 file changed, 1 insertion(+) diff --git a/.commitlintrc.json b/.commitlintrc.json index 0706153439d7..ba22b047f2de 100644 --- a/.commitlintrc.json +++ b/.commitlintrc.json @@ -28,6 +28,7 @@ "mon", "monitoring", "multus", + "network", "nfs", "object", "operator", From 25aa15e6c1c0d9799123fef8e6ac35f1fbe966da Mon Sep 17 00:00:00 2001 From: Michael Adam Date: Mon, 5 Feb 2024 20:02:51 +0100 Subject: [PATCH 4/8] network: disallow legacy hostNetwork together with non-default provider Fixes: #13692 Since the introduction of the "host" network provider, the legacy "hostNetwork" setting is intended to be used only in combination with the default network provider (""), but the code did not enforce this. This change adds the required validation checks to throw errors in invalid constellations. These checks are added both in the operator's input validation code and as kubernetes x-validation admission policies in the Cepcluster CRD. Signed-off-by: Michael Adam --- .../charts/rook-ceph/templates/resources.yaml | 2 ++ deploy/examples/crds.yaml | 2 ++ pkg/apis/ceph.rook.io/v1/network.go | 3 ++ pkg/apis/ceph.rook.io/v1/network_test.go | 30 +++++++++++++++++++ pkg/apis/ceph.rook.io/v1/types.go | 1 + 5 files changed, 38 insertions(+) diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml index bc4fa1ba0d53..bd4ff3c741f0 100644 --- a/deploy/charts/rook-ceph/templates/resources.yaml +++ b/deploy/charts/rook-ceph/templates/resources.yaml @@ -2282,6 +2282,8 @@ spec: x-kubernetes-validations: - message: at least one network selector must be specified when using multus rule: '!has(self.provider) || (self.provider != ''multus'' || (self.provider == ''multus'' && size(self.selectors) > 0))' + - message: the legacy hostNetwork setting can only be set if the network.provider is set to the empty string + rule: '!has(self.hostNetwork) || self.hostNetwork == false || !has(self.provider) || self.provider == ""' placement: additionalProperties: description: Placement is the placement for an object diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml index 8c01375060a6..bf773841de34 100644 --- a/deploy/examples/crds.yaml +++ b/deploy/examples/crds.yaml @@ -2280,6 +2280,8 @@ spec: x-kubernetes-validations: - message: at least one network selector must be specified when using multus rule: '!has(self.provider) || (self.provider != ''multus'' || (self.provider == ''multus'' && size(self.selectors) > 0))' + - message: the legacy hostNetwork setting can only be set if the network.provider is set to the empty string + rule: '!has(self.hostNetwork) || self.hostNetwork == false || !has(self.provider) || self.provider == ""' placement: additionalProperties: description: Placement is the placement for an object diff --git a/pkg/apis/ceph.rook.io/v1/network.go b/pkg/apis/ceph.rook.io/v1/network.go index 6abd842ee54d..629f30a069d0 100644 --- a/pkg/apis/ceph.rook.io/v1/network.go +++ b/pkg/apis/ceph.rook.io/v1/network.go @@ -39,6 +39,9 @@ func (n *NetworkSpec) IsHost() bool { } func ValidateNetworkSpec(clusterNamespace string, spec NetworkSpec) error { + if spec.HostNetwork && (spec.Provider != NetworkProviderDefault) { + return errors.Errorf(`the legacy hostNetwork setting is only valid with the default network provider ("") and not with '%q'`, spec.Provider) + } if spec.IsMultus() { if len(spec.Selectors) == 0 { return errors.Errorf("at least one network selector must be specified when using the %q network provider", NetworkProviderMultus) diff --git a/pkg/apis/ceph.rook.io/v1/network_test.go b/pkg/apis/ceph.rook.io/v1/network_test.go index 38d4be111ab0..3e3446257f26 100644 --- a/pkg/apis/ceph.rook.io/v1/network_test.go +++ b/pkg/apis/ceph.rook.io/v1/network_test.go @@ -42,6 +42,36 @@ func TestNetworkCephSpecLegacy(t *testing.T) { assert.Equal(t, expected, net) } +func TestValidateNetworkSpec(t *testing.T) { + net := NetworkSpec{ + HostNetwork: true, + Provider: NetworkProviderDefault, + } + err := ValidateNetworkSpec("", net) + assert.NoError(t, err) + + net = NetworkSpec{ + HostNetwork: true, + Provider: NetworkProviderHost, + } + err = ValidateNetworkSpec("", net) + assert.Error(t, err) + + net = NetworkSpec{ + HostNetwork: false, + Provider: NetworkProviderDefault, + } + err = ValidateNetworkSpec("", net) + assert.NoError(t, err) + + net = NetworkSpec{ + HostNetwork: false, + Provider: NetworkProviderHost, + } + err = ValidateNetworkSpec("", net) + assert.NoError(t, err) +} + func TestNetworkCephIsHostLegacy(t *testing.T) { net := NetworkSpec{HostNetwork: true} diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index ddcddc31649f..eb04cd02073c 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -2316,6 +2316,7 @@ type SSSDSidecarAdditionalFile struct { // NetworkSpec for Ceph includes backward compatibility code // +kubebuilder:validation:XValidation:message="at least one network selector must be specified when using multus",rule="!has(self.provider) || (self.provider != 'multus' || (self.provider == 'multus' && size(self.selectors) > 0))" +// +kubebuilder:validation:XValidation:message=`the legacy hostNetwork setting can only be set if the network.provider is set to the empty string`,rule=`!has(self.hostNetwork) || self.hostNetwork == false || !has(self.provider) || self.provider == ""` type NetworkSpec struct { // Provider is what provides network connectivity to the cluster e.g. "host" or "multus". // If the Provider is updated from being empty to "host" on a running cluster, then the operator will automatically fail over all the mons to apply the "host" network settings. From db4b3322faf3ab0708d8bfee03d5e9f2f2008aed Mon Sep 17 00:00:00 2001 From: Redouane Kachach Date: Fri, 9 Feb 2024 16:55:04 +0100 Subject: [PATCH 5/8] mgr: remove privileged security context from mgr sidecar container Mgr sidecar was using a privileged security context because it needed to create the /var/lib/rook directory, generate the config on it, and then copy it to the /etc/ceph directory. This change creates empty directories on /var/lib/rook and /etc/ceph so the mgr sidecar can use them, removing the need for using a privileged security context. Fixes: https://github.com/rook/rook/issues/13719 Signed-off-by: Redouane Kachach --- pkg/operator/ceph/cluster/mgr/spec.go | 30 ++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/pkg/operator/ceph/cluster/mgr/spec.go b/pkg/operator/ceph/cluster/mgr/spec.go index d2167c81e38f..4a452a70acd4 100644 --- a/pkg/operator/ceph/cluster/mgr/spec.go +++ b/pkg/operator/ceph/cluster/mgr/spec.go @@ -74,7 +74,19 @@ func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error) if c.spec.Mgr.Count > 1 { podSpec.Spec.Containers = append(podSpec.Spec.Containers, c.makeMgrSidecarContainer(mgrConfig)) matchLabels := controller.AppLabels(AppName, c.clusterInfo.Namespace) - podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, mon.CephSecretVolume()) + // append ceph secret volume and some empty volumes needed by the mgr sidecar + podSpec.Spec.Volumes = append(podSpec.Spec.Volumes, + v1.Volume{ + Name: "rook-config", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }}, + v1.Volume{ + Name: "default-config-dir", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }}, + mon.CephSecretVolume()) // Stretch the mgrs across hosts by default, or across a bigger failure domain for when zones are required like in case of stretched cluster topologyKey := v1.LabelHostname @@ -225,6 +237,18 @@ func (c *Cluster) makeMgrSidecarContainer(mgrConfig *mgrConfig) v1.Container { {Name: "ROOK_CEPH_VERSION", Value: "ceph version " + c.clusterInfo.CephVersion.String()}, } + volumeMounts := []v1.VolumeMount{ + { + MountPath: "/var/lib/rook", + Name: "rook-config", + }, + { + MountPath: "/etc/ceph", + Name: "default-config-dir", + }, + } + volumeMounts = append(volumeMounts, mon.CephSecretVolumeMount()) + return v1.Container{ Args: []string{"ceph", "mgr", "watch-active"}, Name: "watch-active", @@ -232,8 +256,8 @@ func (c *Cluster) makeMgrSidecarContainer(mgrConfig *mgrConfig) v1.Container { ImagePullPolicy: controller.GetContainerImagePullPolicy(c.spec.CephVersion.ImagePullPolicy), Env: envVars, Resources: cephv1.GetMgrSidecarResources(c.spec.Resources), - SecurityContext: controller.PrivilegedContext(true), - VolumeMounts: []v1.VolumeMount{mon.CephSecretVolumeMount()}, + SecurityContext: controller.PodSecurityContext(), + VolumeMounts: volumeMounts, } } From 5bff860380f34d1aa1ee9dceb9bcb2ed5d64da58 Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Wed, 7 Feb 2024 21:20:22 +0530 Subject: [PATCH 6/8] core: remove namespace/ownerRef from networkFence since networkFence is a cluster-based resource so that we don't need the namespace and ownerReferences as it cause garbage-collector errors. Also, now we create the networkFence with clusteUID label so when doing cleanup we match the cephCluster uid and networkFence label clusterUID. Signed-off-by: subhamkrai --- .../rook-ceph/templates/clusterrole.yaml | 2 +- deploy/examples/common.yaml | 2 +- pkg/operator/ceph/cluster/controller.go | 34 ++++++++++++++++++- pkg/operator/ceph/cluster/controller_test.go | 8 ++++- pkg/operator/ceph/cluster/watcher.go | 19 +++++++---- pkg/operator/ceph/cluster/watcher_test.go | 22 ++++++------ pkg/operator/ceph/csi/csi.go | 2 +- 7 files changed, 65 insertions(+), 24 deletions(-) diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index ebcb9ce368a3..12c2ad02e105 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -20,7 +20,7 @@ rules: verbs: ["create"] - apiGroups: ["csiaddons.openshift.io"] resources: ["networkfences"] - verbs: ["create", "get", "update", "delete", "watch", "list"] + verbs: ["create", "get", "update", "delete", "watch", "list", "deletecollection"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["get"] diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index dcabb0eeb516..c344860c1b04 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -563,7 +563,7 @@ rules: verbs: ["create"] - apiGroups: ["csiaddons.openshift.io"] resources: ["networkfences"] - verbs: ["create", "get", "update", "delete", "watch", "list"] + verbs: ["create", "get", "update", "delete", "watch", "list", "deletecollection"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["get"] diff --git a/pkg/operator/ceph/cluster/controller.go b/pkg/operator/ceph/cluster/controller.go index 04dc765849f8..0112554fe210 100644 --- a/pkg/operator/ceph/cluster/controller.go +++ b/pkg/operator/ceph/cluster/controller.go @@ -38,6 +38,7 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" apituntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -387,9 +388,40 @@ func (c *ClusterController) requestClusterDelete(cluster *cephv1.CephCluster) (r nsName, existing.namespacedName.Name) return reconcile.Result{}, nil // do not requeue the delete } + _, err := c.context.ApiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(c.OpManagerCtx, "networkfences.csiaddons.openshift.io", metav1.GetOptions{}) + if err == nil { + logger.Info("removing networkFence if matching cephCluster UID found") + networkFenceList := &addonsv1alpha1.NetworkFenceList{} + labelSelector := labels.SelectorFromSet(map[string]string{ + networkFenceLabel: string(cluster.GetUID()), + }) - logger.Infof("cleaning up CephCluster %q", nsName) + opts := []client.DeleteAllOfOption{ + client.MatchingLabels{ + networkFenceLabel: string(cluster.GetUID()), + }, + client.GracePeriodSeconds(0), + } + err = c.client.DeleteAllOf(c.OpManagerCtx, &addonsv1alpha1.NetworkFence{}, opts...) + if err != nil && !kerrors.IsNotFound(err) { + return reconcile.Result{}, errors.Wrapf(err, "failed to delete networkFence with label %s", networkFenceLabel) + } + + err = c.client.List(c.OpManagerCtx, networkFenceList, &client.MatchingLabelsSelector{Selector: labelSelector}) + if err != nil && !kerrors.IsNotFound(err) { + return reconcile.Result{}, errors.Wrap(err, "failed to list networkFence") + } + if len(networkFenceList.Items) > 0 { + for i := range networkFenceList.Items { + err = opcontroller.RemoveFinalizerWithName(c.OpManagerCtx, c.client, &networkFenceList.Items[i], "csiaddons.openshift.io/network-fence") + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") + } + } + } + } + logger.Infof("cleaning up CephCluster %q", nsName) if cluster, ok := c.clusterMap[cluster.Namespace]; ok { // We used to stop the bucket controller here but when we get a DELETE event for the CephCluster // we will reload the CRD manager anyway so the bucket controller go routine will be stopped diff --git a/pkg/operator/ceph/cluster/controller_test.go b/pkg/operator/ceph/cluster/controller_test.go index 8a5ffc6c6eb0..2ab9036111c2 100644 --- a/pkg/operator/ceph/cluster/controller_test.go +++ b/pkg/operator/ceph/cluster/controller_test.go @@ -21,11 +21,14 @@ import ( "testing" "time" + addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" "github.com/rook/rook/pkg/client/clientset/versioned/scheme" "github.com/rook/rook/pkg/clusterd" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + apifake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -75,11 +78,14 @@ func TestReconcileDeleteCephCluster(t *testing.T) { // create a Rook-Ceph scheme to use for our tests scheme := runtime.NewScheme() assert.NoError(t, cephv1.AddToScheme(scheme)) + assert.NoError(t, addonsv1alpha1.AddToScheme(scheme)) t.Run("deletion blocked while dependencies exist", func(t *testing.T) { // set up clusterd.Context clusterdCtx := &clusterd.Context{ - Clientset: k8sfake.NewSimpleClientset(), + Clientset: k8sfake.NewSimpleClientset(), + RookClientset: rookclient.NewSimpleClientset(), + ApiExtensionsClient: apifake.NewSimpleClientset(), } // create the cluster controller and tell it that the cluster has been deleted diff --git a/pkg/operator/ceph/cluster/watcher.go b/pkg/operator/ceph/cluster/watcher.go index ab1e3f7b81c7..6b8f08d40567 100644 --- a/pkg/operator/ceph/cluster/watcher.go +++ b/pkg/operator/ceph/cluster/watcher.go @@ -51,7 +51,10 @@ type clientCluster struct { context *clusterd.Context } -var nodesCheckedForReconcile = sets.New[string]() +var ( + nodesCheckedForReconcile = sets.New[string]() + networkFenceLabel = "cephClusterUID" +) // drivers that supports fencing, used in naming networkFence object const ( @@ -511,8 +514,8 @@ func concatenateWatcherIp(address string) string { return watcherIP } -func fenceResourceName(nodeName, driver string) string { - return fmt.Sprintf("%s-%s", nodeName, driver) +func fenceResourceName(nodeName, driver, namespace string) string { + return fmt.Sprintf("%s-%s-%s", nodeName, driver, namespace) } func (c *clientCluster) createNetworkFence(ctx context.Context, pv corev1.PersistentVolume, node *corev1.Node, cluster *cephv1.CephCluster, cidr []string, driver string) error { @@ -531,8 +534,10 @@ func (c *clientCluster) createNetworkFence(ctx context.Context, pv corev1.Persis networkFence := &addonsv1alpha1.NetworkFence{ ObjectMeta: metav1.ObjectMeta{ - Name: fenceResourceName(node.Name, driver), - Namespace: cluster.Namespace, + Name: fenceResourceName(node.Name, driver, cluster.Namespace), + Labels: map[string]string{ + networkFenceLabel: string(cluster.GetUID()), + }, }, Spec: addonsv1alpha1.NetworkFenceSpec{ Driver: pv.Spec.CSI.Driver, @@ -560,7 +565,7 @@ func (c *clientCluster) createNetworkFence(ctx context.Context, pv corev1.Persis func (c *clientCluster) unfenceAndDeleteNetworkFence(ctx context.Context, node corev1.Node, cluster *cephv1.CephCluster, driver string) error { networkFence := &addonsv1alpha1.NetworkFence{} - err := c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, driver), Namespace: cluster.Namespace}, networkFence) + err := c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, driver, cluster.Namespace)}, networkFence) if err != nil && !errors.IsNotFound(err) { return err } else if errors.IsNotFound(err) { @@ -577,7 +582,7 @@ func (c *clientCluster) unfenceAndDeleteNetworkFence(ctx context.Context, node c } err = wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) { - err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, driver), Namespace: cluster.Namespace}, networkFence) + err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, driver, cluster.Namespace)}, networkFence) if err != nil && !errors.IsNotFound(err) { return false, err } diff --git a/pkg/operator/ceph/cluster/watcher_test.go b/pkg/operator/ceph/cluster/watcher_test.go index 5f467d5f84e3..6e527b9108aa 100644 --- a/pkg/operator/ceph/cluster/watcher_test.go +++ b/pkg/operator/ceph/cluster/watcher_test.go @@ -353,11 +353,11 @@ func TestHandleNodeFailure(t *testing.T) { assert.NoError(t, err) networkFenceRbd := &addonsv1alpha1.NetworkFence{} - err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, rbdDriver), Namespace: cephCluster.Namespace}, networkFenceRbd) + err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, rbdDriver, ns)}, networkFenceRbd) assert.NoError(t, err) networkFenceCephFs := &addonsv1alpha1.NetworkFence{} - err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, cephfsDriver), Namespace: cephCluster.Namespace}, networkFenceCephFs) + err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, cephfsDriver, ns)}, networkFenceCephFs) assert.NoError(t, err) networkFences := &addonsv1alpha1.NetworkFenceList{} @@ -367,12 +367,10 @@ func TestHandleNodeFailure(t *testing.T) { for _, fence := range networkFences.Items { // Check if the resource is in the desired namespace - if fence.Namespace == cephCluster.Namespace { - if strings.Contains(fence.Name, rbdDriver) { - rbdCount++ - } else if strings.Contains(fence.Name, cephfsDriver) { - cephFsCount++ - } + if strings.Contains(fence.Name, rbdDriver) { + rbdCount++ + } else if strings.Contains(fence.Name, cephfsDriver) { + cephFsCount++ } } @@ -431,10 +429,10 @@ func TestHandleNodeFailure(t *testing.T) { err = c.handleNodeFailure(ctx, cephCluster, node) assert.NoError(t, err) - err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, rbdDriver), Namespace: cephCluster.Namespace}, networkFenceRbd) + err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, rbdDriver, ns), Namespace: cephCluster.Namespace}, networkFenceRbd) assert.Error(t, err, kerrors.IsNotFound(err)) - err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, cephfsDriver), Namespace: cephCluster.Namespace}, networkFenceCephFs) + err = c.client.Get(ctx, types.NamespacedName{Name: fenceResourceName(node.Name, cephfsDriver, ns), Namespace: cephCluster.Namespace}, networkFenceCephFs) assert.Error(t, err, kerrors.IsNotFound(err)) } @@ -487,8 +485,8 @@ func TestConcatenateWatcherIp(t *testing.T) { } func TestFenceResourceName(t *testing.T) { - FenceName := fenceResourceName("fakenode", "rbd") - assert.Equal(t, FenceName, "fakenode-rbd") + FenceName := fenceResourceName("fakenode", "rbd", "rook-ceph") + assert.Equal(t, FenceName, "fakenode-rbd-rook-ceph") } func TestOnDeviceCMUpdate(t *testing.T) { diff --git a/pkg/operator/ceph/csi/csi.go b/pkg/operator/ceph/csi/csi.go index 48bc7d8ebbf1..26d34d4a052e 100644 --- a/pkg/operator/ceph/csi/csi.go +++ b/pkg/operator/ceph/csi/csi.go @@ -155,7 +155,7 @@ func (r *ReconcileCSI) setParams(ver *version.Info) error { } CSIParam.EnableCSIAddonsSideCar = false - _, err = r.context.ApiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(r.opManagerContext, "csiaddonsnode.csiaddons.openshift.io", metav1.GetOptions{}) + _, err = r.context.ApiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(r.opManagerContext, "csiaddonsnodes.csiaddons.openshift.io", metav1.GetOptions{}) if err == nil { CSIParam.EnableCSIAddonsSideCar = true } From 135307a4dffe14a4f0f55c97ee9ce54cbc22c363 Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Tue, 13 Feb 2024 13:30:25 +0530 Subject: [PATCH 7/8] ci: upgrade min k8s supported version to 1.24.17 upgrading minimum kubernetes supported version to v1.24.17 and also upgrading other kubernetes version to their latest respective version. Signed-off-by: subhamkrai --- .github/workflows/canary-test-config/action.yaml | 2 +- .github/workflows/integration-test-helm-suite.yaml | 2 +- .github/workflows/integration-test-mgr-suite.yaml | 2 +- .../integration-test-multi-cluster-suite.yaml | 2 +- .../workflows/integration-test-object-suite.yaml | 2 +- .../workflows/integration-test-smoke-suite.yaml | 2 +- .../workflows/integration-test-upgrade-suite.yaml | 4 ++-- .../workflows/integration-tests-on-release.yaml | 12 ++++++------ .../Getting-Started/Prerequisites/prerequisites.md | 2 +- Documentation/Getting-Started/quickstart.md | 2 +- PendingReleaseNotes.md | 3 +++ tests/scripts/github-action-helper.sh | 14 +++++++------- 12 files changed, 26 insertions(+), 23 deletions(-) diff --git a/.github/workflows/canary-test-config/action.yaml b/.github/workflows/canary-test-config/action.yaml index 2417686b1834..49c032d9c417 100644 --- a/.github/workflows/canary-test-config/action.yaml +++ b/.github/workflows/canary-test-config/action.yaml @@ -19,7 +19,7 @@ runs: - name: Setup Minikube shell: bash --noprofile --norc -eo pipefail -x {0} run: | - tests/scripts/github-action-helper.sh install_minikube_with_none_driver v1.29.0 + tests/scripts/github-action-helper.sh install_minikube_with_none_driver v1.29.1 - name: install deps shell: bash --noprofile --norc -eo pipefail -x {0} diff --git a/.github/workflows/integration-test-helm-suite.yaml b/.github/workflows/integration-test-helm-suite.yaml index b59d153acfde..ab7d731c54ff 100644 --- a/.github/workflows/integration-test-helm-suite.yaml +++ b/.github/workflows/integration-test-helm-suite.yaml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/integration-test-mgr-suite.yaml b/.github/workflows/integration-test-mgr-suite.yaml index 22237b2e9d81..0b94b916aeea 100644 --- a/.github/workflows/integration-test-mgr-suite.yaml +++ b/.github/workflows/integration-test-mgr-suite.yaml @@ -24,7 +24,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.29.0"] + kubernetes-versions: ["v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/integration-test-multi-cluster-suite.yaml b/.github/workflows/integration-test-multi-cluster-suite.yaml index ff05f8e7f217..8ce883ece1c2 100644 --- a/.github/workflows/integration-test-multi-cluster-suite.yaml +++ b/.github/workflows/integration-test-multi-cluster-suite.yaml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.29.0"] + kubernetes-versions: ["v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/integration-test-object-suite.yaml b/.github/workflows/integration-test-object-suite.yaml index 31978c811610..11a394f3620b 100644 --- a/.github/workflows/integration-test-object-suite.yaml +++ b/.github/workflows/integration-test-object-suite.yaml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/integration-test-smoke-suite.yaml b/.github/workflows/integration-test-smoke-suite.yaml index 4c13b9a68622..a2acb9653b94 100644 --- a/.github/workflows/integration-test-smoke-suite.yaml +++ b/.github/workflows/integration-test-smoke-suite.yaml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/integration-test-upgrade-suite.yaml b/.github/workflows/integration-test-upgrade-suite.yaml index 605a824fce93..e006af209144 100644 --- a/.github/workflows/integration-test-upgrade-suite.yaml +++ b/.github/workflows/integration-test-upgrade-suite.yaml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 @@ -69,7 +69,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/integration-tests-on-release.yaml b/.github/workflows/integration-tests-on-release.yaml index 1932f646efcd..0f15ed334bbe 100644 --- a/.github/workflows/integration-tests-on-release.yaml +++ b/.github/workflows/integration-tests-on-release.yaml @@ -18,7 +18,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.25.16", "v1.27.8", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.25.16", "v1.27.10", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 @@ -58,7 +58,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.25.16", "v1.27.8", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.25.16", "v1.27.10", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 @@ -99,7 +99,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.25.16", "v1.27.8", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.25.16", "v1.27.10", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 @@ -137,7 +137,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.25.16", "v1.27.8", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.25.16", "v1.27.10", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 @@ -175,7 +175,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.26.11", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.26.11", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 @@ -216,7 +216,7 @@ jobs: strategy: fail-fast: false matrix: - kubernetes-versions: ["v1.23.17", "v1.29.0"] + kubernetes-versions: ["v1.24.17", "v1.29.1"] steps: - name: checkout uses: actions/checkout@v4 diff --git a/Documentation/Getting-Started/Prerequisites/prerequisites.md b/Documentation/Getting-Started/Prerequisites/prerequisites.md index 8f21c0d5e84d..9a34d3cf9fbb 100644 --- a/Documentation/Getting-Started/Prerequisites/prerequisites.md +++ b/Documentation/Getting-Started/Prerequisites/prerequisites.md @@ -7,7 +7,7 @@ and Rook is granted the required privileges (see below for more information). ## Kubernetes Version -Kubernetes versions **v1.23** through **v1.29** are supported. +Kubernetes versions **v1.24** through **v1.29** are supported. ## CPU Architecture diff --git a/Documentation/Getting-Started/quickstart.md b/Documentation/Getting-Started/quickstart.md index 2e4bdd610641..380f3d0ef063 100644 --- a/Documentation/Getting-Started/quickstart.md +++ b/Documentation/Getting-Started/quickstart.md @@ -12,7 +12,7 @@ This guide will walk through the basic setup of a Ceph cluster and enable K8s ap ## Kubernetes Version -Kubernetes versions **v1.23** through **v1.29** are supported. +Kubernetes versions **v1.24** through **v1.29** are supported. ## CPU Architecture diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index 609e93295251..48e5d2a463d5 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -1,7 +1,10 @@ # v1.14 Pending Release Notes ## Breaking Changes + - The removal of `CSI_ENABLE_READ_AFFINITY` option and its replacement with per-cluster read affinity setting in cephCluster CR (CSIDriverOptions section) in [PR](https://github.com/rook/rook/pull/13665) ## Features + +- Kubernetes versions **v1.24** through **v1.29** are supported. diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index c7b3ae0d2017..faf23f116083 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -32,7 +32,7 @@ function find_extra_block_dev() { # --nodeps ignores partitions extra_dev="$(sudo lsblk --noheading --list --nodeps --output KNAME | grep -v loop | grep -v "$boot_dev" | head -1)" echo " == find_extra_block_dev(): extra_dev='$extra_dev'" >/dev/stderr # debug in case of future errors - echo "$extra_dev" # output of function + echo "$extra_dev" # output of function } : "${BLOCK:=$(find_extra_block_dev)}" @@ -705,8 +705,8 @@ function test_csi_nfs_workload { } function install_minikube_with_none_driver() { - CRICTL_VERSION="v1.28.0" - MINIKUBE_VERSION="v1.31.2" + CRICTL_VERSION="v1.29.0" + MINIKUBE_VERSION="v1.32.0" sudo apt update sudo apt install -y conntrack socat @@ -714,16 +714,16 @@ function install_minikube_with_none_driver() { sudo dpkg -i minikube_latest_amd64.deb rm -f minikube_latest_amd64.deb - curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd_0.3.4.3-0.ubuntu-focal_amd64.deb - sudo dpkg -i cri-dockerd_0.3.4.3-0.ubuntu-focal_amd64.deb - rm -f cri-dockerd_0.3.4.3-0.ubuntu-focal_amd64.deb + curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd_0.3.9.3-0.ubuntu-focal_amd64.deb + sudo dpkg -i cri-dockerd_0.3.9.3-0.ubuntu-focal_amd64.deb + rm -f cri-dockerd_0.3.9.3-0.ubuntu-focal_amd64.deb wget https://github.com/kubernetes-sigs/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-amd64.tar.gz sudo tar zxvf crictl-$CRICTL_VERSION-linux-amd64.tar.gz -C /usr/local/bin rm -f crictl-$CRICTL_VERSION-linux-amd64.tar.gz sudo sysctl fs.protected_regular=0 - CNI_PLUGIN_VERSION="v1.3.0" + CNI_PLUGIN_VERSION="v1.4.0" CNI_PLUGIN_TAR="cni-plugins-linux-amd64-$CNI_PLUGIN_VERSION.tgz" # change arch if not on amd64 CNI_PLUGIN_INSTALL_DIR="/opt/cni/bin" From 650e71ac228b6af10bb6c5805d3178b281be6380 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 12:33:07 +0000 Subject: [PATCH 8/8] build(deps): bump the github-dependencies group with 2 updates Bumps the github-dependencies group with 2 updates: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) and [github.com/hashicorp/vault/api](https://github.com/hashicorp/vault). Updates `github.com/aws/aws-sdk-go` from 1.50.12 to 1.50.15 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.50.12...v1.50.15) Updates `github.com/hashicorp/vault/api` from 1.11.0 to 1.12.0 - [Release notes](https://github.com/hashicorp/vault/releases) - [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/vault/compare/v1.11.0...v1.12.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: github-dependencies - dependency-name: github.com/hashicorp/vault/api dependency-type: direct:production update-type: version-update:semver-minor dependency-group: github-dependencies ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- pkg/apis/go.mod | 2 +- pkg/apis/go.sum | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index d082a906f8eb..d83e05e618eb 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ replace github.com/rook/rook/pkg/apis => ./pkg/apis require ( github.com/IBM/keyprotect-go-client v0.12.2 - github.com/aws/aws-sdk-go v1.50.12 + github.com/aws/aws-sdk-go v1.50.15 github.com/banzaicloud/k8s-objectmatcher v1.8.0 github.com/ceph/go-ceph v0.25.0 github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8 @@ -15,7 +15,7 @@ require ( github.com/go-ini/ini v1.67.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 - github.com/hashicorp/vault/api v1.11.0 + github.com/hashicorp/vault/api v1.12.0 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.5.0 github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1 diff --git a/go.sum b/go.sum index 605adee08989..74ed1d43734e 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.50.12 h1:Gc6QS4Ys++cWSl63U+HyPbKeLVcoOvi6veayhcipPac= -github.com/aws/aws-sdk-go v1.50.12/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.50.15 h1:wEMnPfEQQFaoIJwuO18zq/vtG4Ft7NxQ3r9xlEi/8zg= +github.com/aws/aws-sdk-go v1.50.15/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/banzaicloud/k8s-objectmatcher v1.8.0 h1:Nugn25elKtPMTA2br+JgHNeSQ04sc05MDPmpJnd1N2A= github.com/banzaicloud/k8s-objectmatcher v1.8.0/go.mod h1:p2LSNAjlECf07fbhDyebTkPUIYnU05G+WfGgkTmgeMg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -487,8 +487,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= -github.com/hashicorp/vault/api v1.11.0 h1:AChWByeHf4/P9sX3Y1B7vFsQhZO2BgQiCMQ2SA1P1UY= -github.com/hashicorp/vault/api v1.11.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/hashicorp/vault/api/auth/approle v0.5.0 h1:a1TK6VGwYqSAfkmX4y4dJ4WBxMU5dStIZqScW4EPXR8= github.com/hashicorp/vault/api/auth/approle v0.5.0/go.mod h1:CHOQIA1AZACfjTzHggmyfiOZ+xCSKNRFqe48FTCzH0k= github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 h1:CXO0fD7M3iCGovP/UApeHhPcH4paDFKcu7AjEXi94rI= diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod index 277c0947cc5c..709465d1a019 100644 --- a/pkg/apis/go.mod +++ b/pkg/apis/go.mod @@ -3,7 +3,7 @@ module github.com/rook/rook/pkg/apis go 1.21 require ( - github.com/hashicorp/vault/api v1.11.0 + github.com/hashicorp/vault/api v1.12.0 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.5.0 github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 github.com/libopenstorage/secrets v0.0.0-20231011182615-5f4b25ceede1 diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum index 7251d76edffa..20725628ea6f 100644 --- a/pkg/apis/go.sum +++ b/pkg/apis/go.sum @@ -314,8 +314,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= -github.com/hashicorp/vault/api v1.11.0 h1:AChWByeHf4/P9sX3Y1B7vFsQhZO2BgQiCMQ2SA1P1UY= -github.com/hashicorp/vault/api v1.11.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/hashicorp/vault/api/auth/approle v0.5.0 h1:a1TK6VGwYqSAfkmX4y4dJ4WBxMU5dStIZqScW4EPXR8= github.com/hashicorp/vault/api/auth/approle v0.5.0/go.mod h1:CHOQIA1AZACfjTzHggmyfiOZ+xCSKNRFqe48FTCzH0k= github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 h1:CXO0fD7M3iCGovP/UApeHhPcH4paDFKcu7AjEXi94rI=