From b3f51ae2ba5ecf6d32cee6d70664fe95c6c6c458 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 3 Jul 2023 14:09:08 +0200 Subject: [PATCH] csi: update csi holder daemonset template Currently the holder daemonset is never updated which will leaves the images the daemonset also not updated. we should update the daemonset template but not restart the csi holder pods which causes the CSI volume access problem, set the updateStrategy to OnDelete (already set in yaml files) which allow us to update the holder daemonset but not restart/update the pods, when a pod is deleted or node is rebooted the new changes will take effect. Signed-off-by: Madhu Rajanna (cherry picked from commit 1ba3aa4d1972c6ecbc9ab3c9ae7ce1a5fbe0e8dc) --- pkg/operator/ceph/csi/spec.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index f7693802136b..1f48c172b1a0 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -856,7 +856,11 @@ func (r *ReconcileCSI) configureHolder(driver driverDetails, c ClusterDetail, tp _, err = r.context.Clientset.AppsV1().DaemonSets(r.opConfig.OperatorNamespace).Create(r.opManagerContext, cephPluginHolder, metav1.CreateOptions{}) if err != nil { if kerrors.IsAlreadyExists(err) { - logger.Debugf("holder %q already exists for cluster %q, it should never be updated", cephPluginHolder.Name, c.cluster.Namespace) + _, err = r.context.Clientset.AppsV1().DaemonSets(r.opConfig.OperatorNamespace).Update(r.opManagerContext, cephPluginHolder, metav1.UpdateOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to update ceph plugin holder daemonset %q", cephPluginHolder.Name) + } + logger.Debugf("holder %q already exists for cluster %q, updating it, restart holder pods to take effect of update", cephPluginHolder.Name, c.cluster.Namespace) } else { return errors.Wrapf(err, "failed to start ceph plugin holder daemonset %q", cephPluginHolder.Name) }