From b8f620df9f3920c302d3f5b10220e157fb81ba14 Mon Sep 17 00:00:00 2001 From: Derek Su Date: Mon, 10 Jul 2023 09:57:38 +0800 Subject: [PATCH] Move some debug level messages to info level Signed-off-by: Derek Su --- app/driver.go | 2 +- app/recurring_job.go | 18 +++++------ controller/engine_controller.go | 4 +-- controller/kubernetes_pod_controller.go | 6 ++-- controller/monitor/snapshot_monitor.go | 2 +- controller/replica_controller.go | 4 +-- controller/setting_controller.go | 32 +++++++++---------- controller/volume_controller.go | 6 ++-- csi/crypto/crypto.go | 4 +-- csi/deployment_util.go | 2 +- csi/util.go | 8 ++--- datastore/longhorn.go | 12 +++---- engineapi/disk_service.go | 4 +-- engineapi/proxy.go | 4 +-- engineapi/snapshot.go | 6 ++-- manager/engine.go | 8 ++--- manager/engineimage.go | 4 +-- manager/kubernetes.go | 4 +-- manager/node.go | 6 ++-- manager/setting.go | 2 +- manager/snapshot.go | 2 +- manager/volume.go | 12 +++---- .../instance_manager_collector.go | 2 +- scheduler/replica_scheduler.go | 4 +-- types/setting.go | 2 +- upgrade/upgrade.go | 4 +-- 26 files changed, 82 insertions(+), 82 deletions(-) diff --git a/app/driver.go b/app/driver.go index 255ac1a6a9..cfc0449d57 100644 --- a/app/driver.go +++ b/app/driver.go @@ -178,7 +178,7 @@ func deployDriver(c *cli.Context) error { return errors.Wrap(err, "CSI cannot be deployed because MountPropagation is not set") } - logrus.Debug("Deploying CSI driver") + logrus.Info("Deploying CSI driver") return deployCSIDriver(kubeClient, lhClient, c, managerImage, managerURL) } diff --git a/app/recurring_job.go b/app/recurring_job.go index 6d8d1e2be8..78b8b421ca 100644 --- a/app/recurring_job.go +++ b/app/recurring_job.go @@ -439,7 +439,7 @@ func (job *Job) doSnapshotCleanup(backupDone bool) (err error) { }); err != nil { return err } - job.logger.Debugf("Cleaned up snapshot CR %v for %v", snapshotName, volumeName) + job.logger.Infof("Cleaned up snapshot CR %v for %v", snapshotName, volumeName) } if job.task == longhorn.RecurringJobTypeSnapshotCleanup { @@ -471,7 +471,7 @@ func (job *Job) deleteSnapshots(names []string, volume *longhornclient.Volume, v if err != nil { return err } - job.logger.WithField("volume", volume.Name).Debugf("Deleted snapshot %v", name) + job.logger.WithField("volume", volume.Name).Infof("Deleted snapshot %v", name) } return nil } @@ -514,7 +514,7 @@ func (job *Job) purgeSnapshots(volume *longhornclient.Volume, volumeAPI longhorn } job.logger.Warn("Encountered one or more errors while purging snapshots") } - job.logger.WithField("volume", volume.Name).Debug("Purged snapshots") + job.logger.WithField("volume", volume.Name).Info("Purged snapshots") return nil } @@ -626,9 +626,9 @@ func (job *Job) doRecurringBackup() (err error) { switch info.State { case string(longhorn.BackupStateCompleted): complete = true - job.logger.Debugf("Complete creating backup %v", info.Id) + job.logger.Infof("Completed creating backup %v", info.Id) case string(longhorn.BackupStateNew), string(longhorn.BackupStateInProgress): - job.logger.Debugf("Creating backup %v, current progress %v", info.Id, info.Progress) + job.logger.Infof("Creating backup %v, current progress %v", info.Id, info.Progress) case string(longhorn.BackupStateError), string(longhorn.BackupStateUnknown): return fmt.Errorf("failed to create backup %v: %v", info.Id, info.Error) default: @@ -663,7 +663,7 @@ func (job *Job) doRecurringBackup() (err error) { }); err != nil { return fmt.Errorf("cleaned up backup %v failed for %v: %v", backup, job.volumeName, err) } - job.logger.Debugf("Cleaned up backup %v for %v", backup, job.volumeName) + job.logger.Infof("Cleaned up backup %v for %v", backup, job.volumeName) } if err := job.doSnapshotCleanup(true); err != nil { @@ -865,7 +865,7 @@ func filterVolumesForJob(allowDetached bool, volumes []longhorn.Volume, filterNa } if volume.Status.RestoreRequired { - logger.Debugf("Bypassed to create job for %v volume during restoring from the backup", volume.Name) + logger.Infof("Bypassed to create job for %v volume during restoring from the backup", volume.Name) continue } @@ -874,7 +874,7 @@ func filterVolumesForJob(allowDetached bool, volumes []longhorn.Volume, filterNa *filterNames = append(*filterNames, volume.Name) continue } - logger.Debugf("Cannot create job for %v volume in state %v", volume.Name, volume.Status.State) + logger.Warnf("Cannot create job for %v volume in state %v", volume.Name, volume.Status.State) } } @@ -883,7 +883,7 @@ func getVolumesBySelector(recurringJobType, recurringJobName, namespace string, label := fmt.Sprintf("%s=%s", types.GetRecurringJobLabelKey(recurringJobType, recurringJobName), types.LonghornLabelValueEnabled) - logger.Debugf("Get volumes from label %v", label) + logger.Infof("Got volumes from label %v", label) volumes, err := client.LonghornV1beta2().Volumes(namespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: label, diff --git a/controller/engine_controller.go b/controller/engine_controller.go index b7da4ede8b..b46e2aba9c 100644 --- a/controller/engine_controller.go +++ b/controller/engine_controller.go @@ -1401,7 +1401,7 @@ func checkSizeBeforeRestoration(log logrus.FieldLogger, engine *longhorn.Engine, if !datastore.ErrorIsConflict(err) { return false, err } - log.WithField("volume", v.Name).Debug("Retrying size update for DR volume before restore") + log.WithField("volume", v.Name).Warn("Retrying size update for DR volume before restore") continue } return false, nil @@ -1664,7 +1664,7 @@ func (ec *EngineController) rebuildNewReplica(e *longhorn.Engine) error { } // We cannot rebuild more than one replica at one time if rebuildingInProgress { - ec.logger.WithField("volume", e.Spec.VolumeName).Debug("Skipped rebuilding of replica because there is another rebuild in progress") + ec.logger.WithField("volume", e.Spec.VolumeName).Info("Skipped rebuilding of replica because there is another rebuild in progress") return nil } for replica, addr := range e.Status.CurrentReplicaAddressMap { diff --git a/controller/kubernetes_pod_controller.go b/controller/kubernetes_pod_controller.go index 88fe1978a8..2b2bed6b24 100644 --- a/controller/kubernetes_pod_controller.go +++ b/controller/kubernetes_pod_controller.go @@ -439,7 +439,7 @@ func (kc *KubernetesPodController) getAssociatedVolumes(pod *v1.Pod) ([]*longhor pvc, err := kc.ds.GetPersistentVolumeClaimRO(pod.Namespace, v.VolumeSource.PersistentVolumeClaim.ClaimName) if datastore.ErrorIsNotFound(err) { - log.WithError(err).Debugf("Cannot auto-delete Pod when the associated PersistentVolumeClaim is not found") + log.WithError(err).Warn("Cannot auto-delete Pod when the associated PersistentVolumeClaim is not found") continue } if err != nil { @@ -448,7 +448,7 @@ func (kc *KubernetesPodController) getAssociatedVolumes(pod *v1.Pod) ([]*longhor pv, err := kc.getAssociatedPersistentVolume(pvc) if datastore.ErrorIsNotFound(err) { - log.WithError(err).Debugf("Cannot auto-delete Pod when the associated PersistentVolume is not found") + log.WithError(err).Warn("Cannot auto-delete Pod when the associated PersistentVolume is not found") continue } if err != nil { @@ -458,7 +458,7 @@ func (kc *KubernetesPodController) getAssociatedVolumes(pod *v1.Pod) ([]*longhor if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == types.LonghornDriverName { vol, err := kc.ds.GetVolume(pv.GetName()) if datastore.ErrorIsNotFound(err) { - log.WithError(err).Debugf("Cannot auto-delete Pod when the associated Volume is not found") + log.WithError(err).Warn("Cannot auto-delete Pod when the associated Volume is not found") continue } if err != nil { diff --git a/controller/monitor/snapshot_monitor.go b/controller/monitor/snapshot_monitor.go index fb3b1dd2e7..44137b5ed9 100644 --- a/controller/monitor/snapshot_monitor.go +++ b/controller/monitor/snapshot_monitor.go @@ -299,7 +299,7 @@ func (m *SnapshotMonitor) shouldAddToInProgressSnapshotCheckTasks(snapshotName s _, ok := m.inProgressSnapshotCheckTasks[snapshotName] if ok { - m.logger.WithField("monitor", monitorName).Debugf("snapshot %s is being checked", snapshotName) + m.logger.WithField("monitor", monitorName).Infof("Checking snapshot %s", snapshotName) return false } m.inProgressSnapshotCheckTasks[snapshotName] = struct{}{} diff --git a/controller/replica_controller.go b/controller/replica_controller.go index 94f529c935..fe39ecc52f 100644 --- a/controller/replica_controller.go +++ b/controller/replica_controller.go @@ -244,14 +244,14 @@ func (rc *ReplicaController) UpdateReplicaEvictionStatus(replica *longhorn.Repli if rc.isEvictionRequested(replica) && !replica.Status.EvictionRequested { replica.Status.EvictionRequested = true - log.Debug("Replica has requested eviction") + log.Info("Replica has requested eviction") } // Check if eviction has been cancelled on this replica if !rc.isEvictionRequested(replica) && replica.Status.EvictionRequested { replica.Status.EvictionRequested = false - log.Debug("Replica has cancelled eviction") + log.Info("Replica has cancelled eviction") } } diff --git a/controller/setting_controller.go b/controller/setting_controller.go index c970e526cc..2f0f10850c 100644 --- a/controller/setting_controller.go +++ b/controller/setting_controller.go @@ -859,7 +859,7 @@ func (bst *BackupStoreTimer) Start() { return false, err } - log.Debug("Triggering sync backup target") + log.Info("Triggering sync backup target") backupTarget.Spec.SyncRequestedAt = metav1.Time{Time: time.Now().UTC()} if _, err = bst.ds.UpdateBackupTarget(backupTarget); err != nil && !apierrors.IsConflict(errors.Cause(err)) { log.WithError(err).Warn("Failed to updating backup target") @@ -1223,23 +1223,23 @@ type ClusterInfoStructFields struct { func (info *ClusterInfo) collectClusterScope() { if err := info.collectNamespace(); err != nil { - info.logger.WithError(err).Debug("Failed to collect Longhorn namespace") + info.logger.WithError(err).Warn("Failed to collect Longhorn namespace") } if err := info.collectNodeCount(); err != nil { - info.logger.WithError(err).Debug("Failed to collect number of Longhorn nodes") + info.logger.WithError(err).Warn("Failed to collect number of Longhorn nodes") } if err := info.collectResourceUsage(); err != nil { - info.logger.WithError(err).Debug("Failed to collect Longhorn resource usages") + info.logger.WithError(err).Warn("Failed to collect Longhorn resource usages") } if err := info.collectVolumesInfo(); err != nil { - info.logger.WithError(err).Debug("Failed to collect Longhorn Volumes info") + info.logger.WithError(err).Warn("Failed to collect Longhorn Volumes info") } if err := info.collectSettings(); err != nil { - info.logger.WithError(err).Debug("Failed to collect Longhorn settings") + info.logger.WithError(err).Warn("Failed to collect Longhorn settings") } } @@ -1271,13 +1271,13 @@ func (info *ClusterInfo) collectResourceUsage() error { MatchLabels: label, }) if err != nil { - logrus.WithError(err).Debugf("Failed to get %v label for %v", label, component) + logrus.WithError(err).Warnf("Failed to get %v label for %v", label, component) continue } pods, err := info.ds.ListPodsBySelector(selector) if err != nil { - logrus.WithError(err).Debugf("Failed to list %v Pod by %v label", component, label) + logrus.WithError(err).Warnf("Failed to list %v Pod by %v label", component, label) continue } podCount := len(pods) @@ -1291,7 +1291,7 @@ func (info *ClusterInfo) collectResourceUsage() error { for _, pod := range pods { podMetrics, err := metricsClient.PodMetricses(info.namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { - logrus.WithError(err).Debugf("Failed to get %v Pod", pod.Name) + logrus.WithError(err).Warnf("Failed to get %v Pod", pod.Name) continue } for _, container := range podMetrics.Containers { @@ -1393,7 +1393,7 @@ func (info *ClusterInfo) collectSettings() error { case include[settingName]: convertedValue, err := info.convertSettingValueType(setting) if err != nil { - logrus.WithError(err).Debugf("failed to convert Setting %v value", setting.Name) + logrus.WithError(err).Warnf("Failed to convert Setting %v value", setting.Name) continue } settingMap[setting.Name] = convertedValue @@ -1549,7 +1549,7 @@ func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue st if volumeSpecValue == ignoredValue { globalSetting, err := info.ds.GetSetting(settingName) if err != nil { - info.logger.WithError(err).Debugf("Failed to get Longhorn Setting %v", settingName) + info.logger.WithError(err).Warnf("Failed to get Longhorn Setting %v", settingName) } return globalSetting.Value } @@ -1558,19 +1558,19 @@ func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue st func (info *ClusterInfo) collectNodeScope() { if err := info.collectHostKernelRelease(); err != nil { - info.logger.WithError(err).Debug("Failed to collect host kernel release") + info.logger.WithError(err).Warn("Failed to collect host kernel release") } if err := info.collectHostOSDistro(); err != nil { - info.logger.WithError(err).Debug("Failed to collect host OS distro") + info.logger.WithError(err).Warn("Failed to collect host OS distro") } if err := info.collectNodeDiskCount(); err != nil { - info.logger.WithError(err).Debug("Failed to collect number of node disks") + info.logger.WithError(err).Warn("Failed to collect number of node disks") } if err := info.collectKubernetesNodeProvider(); err != nil { - info.logger.WithError(err).Debug("Failed to collect node provider") + info.logger.WithError(err).Warn("Failed to collect node provider") } } @@ -1612,7 +1612,7 @@ func (info *ClusterInfo) collectNodeDiskCount() error { for _, disk := range node.Spec.Disks { deviceType, err := types.GetDeviceTypeOf(disk.Path) if err != nil { - info.logger.WithError(err).Debugf("Failed to get device type of %v", disk.Path) + info.logger.WithError(err).Warnf("Failed to get device type of %v", disk.Path) deviceType = types.ValueUnknown } structMap[util.StructName(fmt.Sprintf(ClusterInfoNodeDiskCountFmt, strings.ToUpper(deviceType)))]++ diff --git a/controller/volume_controller.go b/controller/volume_controller.go index c5301b27fd..29434423b0 100644 --- a/controller/volume_controller.go +++ b/controller/volume_controller.go @@ -1647,7 +1647,7 @@ func isVolumeOfflineUpgrade(v *longhorn.Volume) bool { func (c *VolumeController) openVolumeDependentResources(v *longhorn.Volume, e *longhorn.Engine, rs map[string]*longhorn.Replica, log *logrus.Entry) error { if isVolumeOfflineUpgrade(v) { - log.Debug("Wait for offline volume upgrade to finish") + log.Info("Waiting for offline volume upgrade to finish") return nil } @@ -2060,7 +2060,7 @@ func (c *VolumeController) replenishReplicas(v *longhorn.Volume, e *longhorn.Eng rs[reusableFailedReplica.Name] = reusableFailedReplica continue } - log.Debugf("Failed to reuse failed replica %v immediately, backoff period is %v now", + log.Warnf("Failed to reuse failed replica %v immediately, backoff period is %v now", reusableFailedReplica.Name, c.backoff.Get(reusableFailedReplica.Name).Seconds()) // Couldn't reuse the replica. Add the volume back to the workqueue to check it later c.enqueueVolumeAfter(v, c.backoff.Get(reusableFailedReplica.Name)) @@ -2494,7 +2494,7 @@ func (c *VolumeController) getIsSchedulableToDiskNodes(v *longhorn.Volume, nodeN // TODO: record the message to condition log.Warn("Found 0 node has at least one schedulable disk") } else { - log.Debugf("Found node %v has at least one schedulable disk", schedulableNodeNames) + log.Infof("Found node %v has at least one schedulable disk", schedulableNodeNames) } }() diff --git a/csi/crypto/crypto.go b/csi/crypto/crypto.go index 68b53f9a7c..d8cdf712f4 100644 --- a/csi/crypto/crypto.go +++ b/csi/crypto/crypto.go @@ -75,7 +75,7 @@ func EncryptVolume(devicePath, passphrase string, cryptoParams *EncryptParams) e // OpenVolume opens volume so that it can be used by the client. func OpenVolume(volume, devicePath, passphrase string) error { if isOpen, _ := IsDeviceOpen(VolumeMapper(volume)); isOpen { - logrus.Debugf("Device %s is already opened at %s", devicePath, VolumeMapper(volume)) + logrus.Infof("Device %s is already opened at %s", devicePath, VolumeMapper(volume)) return nil } @@ -121,7 +121,7 @@ func DeviceEncryptionStatus(devicePath string) (mappedDevice, mapper string, err volume := strings.TrimPrefix(devicePath, mapperFilePathPrefix+"/") stdout, err := luksStatus(volume) if err != nil { - logrus.WithError(err).Debugf("Device %s is not an active LUKS device", devicePath) + logrus.WithError(err).Warnf("Device %s is not an active LUKS device", devicePath) return devicePath, "", nil } lines := strings.Split(string(stdout), "\n") diff --git a/csi/deployment_util.go b/csi/deployment_util.go index 4edd1a9ff1..89596ed90f 100644 --- a/csi/deployment_util.go +++ b/csi/deployment_util.go @@ -216,7 +216,7 @@ func deploy(kubeClient *clientset.Clientset, obj runtime.Object, resource string existingMeta.GetDeletionTimestamp() == nil && !needToUpdateImage(existing, obj) { // deployment of correct version already deployed - logrus.Debugf("Detected %v %v CSI Git commit %v version %v Kubernetes version %v has already been deployed", + logrus.Infof("Detected %v %v CSI Git commit %v version %v Kubernetes version %v has already been deployed", resource, name, annos[AnnotationCSIGitCommit], annos[AnnotationCSIVersion], annos[AnnotationKubernetesVersion]) return nil } diff --git a/csi/util.go b/csi/util.go index 340807801c..c8c4c299dc 100644 --- a/csi/util.go +++ b/csi/util.go @@ -284,7 +284,7 @@ func syncMountPointDirectory(targetPath string) error { // in case where the mount point exists but is corrupt, the mount point will be cleaned up and a error is returned // the underlying implementation utilizes mounter.IsLikelyNotMountPoint so it cannot detect bind mounts func ensureMountPoint(targetPath string, mounter mount.Interface) (bool, error) { - logrus.Debugf("Trying to ensure mount point %v", targetPath) + logrus.Infof("Trying to ensure mount point %v", targetPath) notMnt, err := mount.IsNotMountPoint(mounter, targetPath) if os.IsNotExist(err) { return false, os.MkdirAll(targetPath, 0750) @@ -292,7 +292,7 @@ func ensureMountPoint(targetPath string, mounter mount.Interface) (bool, error) IsCorruptedMnt := mount.IsCorruptedMnt(err) if !IsCorruptedMnt { - logrus.Debugf("Mount point %v try opening and syncing dir to make sure it's healthy", targetPath) + logrus.Infof("Mount point %v try opening and syncing dir to make sure it's healthy", targetPath) if err := syncMountPointDirectory(targetPath); err != nil { logrus.WithError(err).Warnf("Mount point %v was identified as corrupt by opening and syncing", targetPath) IsCorruptedMnt = true @@ -317,10 +317,10 @@ func unmount(targetPath string, mounter mount.Interface) error { forceUnmounter, ok := mounter.(mount.MounterForceUnmounter) if ok { - logrus.Debugf("Trying to force unmount potential mount point %v", targetPath) + logrus.Infof("Trying to force unmount potential mount point %v", targetPath) err = forceUnmounter.UnmountWithForce(targetPath, defaultForceUmountTimeout) } else { - logrus.Debugf("Trying to unmount potential mount point %v", targetPath) + logrus.Infof("Trying to unmount potential mount point %v", targetPath) err = mounter.Unmount(targetPath) } if err == nil { diff --git a/datastore/longhorn.go b/datastore/longhorn.go index a23ef194c2..568dc1e5c3 100644 --- a/datastore/longhorn.go +++ b/datastore/longhorn.go @@ -800,7 +800,7 @@ func (s *DataStore) AddRecurringJobLabelToVolume(volume *longhorn.Volume, labelK if err != nil { return nil, err } - logrus.Debugf("Added volume %v recurring job label %v", volume.Name, labelKey) + logrus.Infof("Added volume %v recurring job label %v", volume.Name, labelKey) } return volume, nil } @@ -814,7 +814,7 @@ func (s *DataStore) RemoveRecurringJobLabelFromVolume(volume *longhorn.Volume, l if err != nil { return nil, err } - logrus.Debugf("Removed volume %v recurring job label %v", volume.Name, labelKey) + logrus.Infof("Removed volume %v recurring job label %v", volume.Name, labelKey) } return volume, nil } @@ -1545,7 +1545,7 @@ func (s *DataStore) CheckEngineImageReadiness(image string, nodes ...string) (is } } if len(undeployedNodes) > 0 { - logrus.Debugf("CheckEngineImageReadiness: nodes %v don't have the engine image %v", undeployedNodes, image) + logrus.Infof("CheckEngineImageReadiness: nodes %v don't have the engine image %v", undeployedNodes, image) return false, nil } return true, nil @@ -2336,7 +2336,7 @@ func (s *DataStore) ListReadyNodesWithEngineImage(image string) (map[string]*lon // GetRandomReadyNode gets a list of all Node in the given namespace and // returns the first Node marked with condition ready and allow scheduling func (s *DataStore) GetRandomReadyNode() (*longhorn.Node, error) { - logrus.Debugf("Prepare to find a random ready node") + logrus.Info("Prepare to find a random ready node") nodesRO, err := s.ListNodesRO() if err != nil { return nil, errors.Wrapf(err, "failed to get random ready node") @@ -2355,7 +2355,7 @@ func (s *DataStore) GetRandomReadyNode() (*longhorn.Node, error) { // GetRandomReadyNodeDisk a list of all Node the in the given namespace and // returns the first Node && the first Disk of the Node marked with condition ready and allow scheduling func (s *DataStore) GetRandomReadyNodeDisk() (*longhorn.Node, string, error) { - logrus.Debugf("Preparing to find a random ready node disk") + logrus.Info("Preparing to find a random ready node disk") nodesRO, err := s.ListNodesRO() if err != nil { return nil, "", errors.Wrapf(err, "failed to get random ready node disk") @@ -4352,7 +4352,7 @@ func (s *DataStore) RemoveSystemRestoreLabel(systemRestore *longhorn.SystemResto "systemRestore": systemRestore.Name, "label": key, }) - log.Debug("Removed SystemRestore label") + log.Info("Removed SystemRestore label") return systemRestore, nil } diff --git a/engineapi/disk_service.go b/engineapi/disk_service.go index 336d406bb0..2d1ef4c38b 100644 --- a/engineapi/disk_service.go +++ b/engineapi/disk_service.go @@ -47,12 +47,12 @@ type DiskService struct { func (s *DiskService) Close() { if s.grpcClient == nil { - s.logger.WithError(errors.New("gRPC client not exist")).Debugf("failed to close disk service client") + s.logger.WithError(errors.New("gRPC client not exist")).Warn("Failed to close disk service client") return } if err := s.grpcClient.Close(); err != nil { - s.logger.WithError(err).Warn("failed to close disk service client") + s.logger.WithError(err).Warn("Failed to close disk service client") } } diff --git a/engineapi/proxy.go b/engineapi/proxy.go index cb2930e501..5383fcc53f 100644 --- a/engineapi/proxy.go +++ b/engineapi/proxy.go @@ -109,12 +109,12 @@ type EngineClientProxy interface { func (p *Proxy) Close() { if p.grpcClient == nil { - p.logger.WithError(errors.New("gRPC client not exist")).Debugf("failed to close engine proxy service client") + p.logger.WithError(errors.New("gRPC client not exist")).Warn("Failed to close engine proxy service client") return } if err := p.grpcClient.Close(); err != nil { - p.logger.WithError(err).Warn("failed to close engine client proxy") + p.logger.WithError(err).Warn("Failed to close engine client proxy") } // The only potential returning error from Close() is diff --git a/engineapi/snapshot.go b/engineapi/snapshot.go index b32bd3c19b..b5d2fff10f 100644 --- a/engineapi/snapshot.go +++ b/engineapi/snapshot.go @@ -83,7 +83,7 @@ func (e *EngineBinary) SnapshotPurge(*longhorn.Engine) error { if _, err := e.ExecuteEngineBinaryWithoutTimeout([]string{}, "snapshot", "purge", "--skip-if-in-progress"); err != nil { return errors.Wrapf(err, "error starting snapshot purge") } - logrus.Debugf("Volume %v snapshot purge started", e.Name()) + logrus.Infof("Volume %v snapshot purge started", e.Name()) return nil } @@ -110,7 +110,7 @@ func (e *EngineBinary) SnapshotClone(engine *longhorn.Engine, snapshotName, from if _, err := e.ExecuteEngineBinaryWithoutTimeout([]string{}, args...); err != nil { return errors.Wrapf(err, "error starting snapshot clone") } - logrus.Debugf("Cloned snapshot %v from volume %v to volume %v", snapshotName, fromControllerAddress, e.cURL) + logrus.Infof("Cloned snapshot %v from volume %v to volume %v", snapshotName, fromControllerAddress, e.cURL) return nil } @@ -144,7 +144,7 @@ func (e *EngineBinary) SnapshotHash(engine *longhorn.Engine, snapshotName string return errors.Wrapf(err, "error starting hashing snapshot") } - logrus.Debugf("Volume %v snapshot hashing started", e.Name()) + logrus.Infof("Volume %v snapshot hashing started", e.Name()) return nil } diff --git a/manager/engine.go b/manager/engine.go index 8f6e800474..59c6abdc77 100644 --- a/manager/engine.go +++ b/manager/engine.go @@ -119,7 +119,7 @@ func (m *VolumeManager) CreateSnapshot(snapshotName string, labels map[string]st return nil, fmt.Errorf("cannot found just created snapshot '%s', for volume '%s'", snapshotName, volumeName) } - logrus.Debugf("Created snapshot %v with labels %+v for volume %v", snapshotName, labels, volumeName) + logrus.Infof("Created snapshot %v with labels %+v for volume %v", snapshotName, labels, volumeName) return snap, nil } @@ -152,7 +152,7 @@ func (m *VolumeManager) DeleteSnapshot(snapshotName, volumeName string) error { return err } - logrus.Debugf("Deleted snapshot %v for volume %v", snapshotName, volumeName) + logrus.Infof("Deleted snapshot %v for volume %v", snapshotName, volumeName) return nil } @@ -198,7 +198,7 @@ func (m *VolumeManager) RevertSnapshot(snapshotName, volumeName string) error { return err } - logrus.Debugf("Revert to snapshot %v for volume %v", snapshotName, volumeName) + logrus.Infof("Reverted to snapshot %v for volume %v", snapshotName, volumeName) return nil } @@ -231,7 +231,7 @@ func (m *VolumeManager) PurgeSnapshot(volumeName string) error { return err } - logrus.Debugf("Started snapshot purge for volume %v", volumeName) + logrus.Infof("Started snapshot purge for volume %v", volumeName) return nil } diff --git a/manager/engineimage.go b/manager/engineimage.go index 1dfc630f44..a744a0d26c 100644 --- a/manager/engineimage.go +++ b/manager/engineimage.go @@ -67,7 +67,7 @@ func (m *VolumeManager) CreateEngineImage(image string) (*longhorn.EngineImage, if err != nil { return nil, err } - logrus.Debugf("Created engine image %v (%v)", ei.Name, ei.Spec.Image) + logrus.Infof("Created engine image %v (%v)", ei.Name, ei.Spec.Image) return ei, nil } @@ -92,7 +92,7 @@ func (m *VolumeManager) DeleteEngineImageByName(name string) error { if err := m.ds.DeleteEngineImage(name); err != nil { return err } - logrus.Debugf("Deleted engine image %v (%v)", ei.Name, ei.Spec.Image) + logrus.Infof("Deleted engine image %v (%v)", ei.Name, ei.Spec.Image) return nil } diff --git a/manager/kubernetes.go b/manager/kubernetes.go index 14385580da..038bfb5630 100644 --- a/manager/kubernetes.go +++ b/manager/kubernetes.go @@ -77,7 +77,7 @@ func (m *VolumeManager) PVCreate(name, pvName, fsType, secretNamespace, secretNa return nil, err } - logrus.Debugf("Created PV for volume %v: %+v", v.Name, v.Spec) + logrus.Infof("Created PV for volume %v: %+v", v.Name, v.Spec) return v, nil } @@ -136,7 +136,7 @@ func (m *VolumeManager) PVCCreate(name, namespace, pvcName string) (v *longhorn. return nil, err } - logrus.Debugf("Created PVC for volume %v: %+v", v.Name, v.Spec) + logrus.Infof("Created PVC for volume %v: %+v", v.Name, v.Spec) return v, nil } diff --git a/manager/node.go b/manager/node.go index d82ad87fbe..78161d5498 100644 --- a/manager/node.go +++ b/manager/node.go @@ -68,7 +68,7 @@ func (m *VolumeManager) UpdateNode(n *longhorn.Node) (*longhorn.Node, error) { if err != nil { return nil, err } - logrus.Debugf("Updated node %v to %+v", node.Spec.Name, node.Spec) + logrus.Infof("Updated node %v to %+v", node.Spec.Name, node.Spec) return node, nil } @@ -113,7 +113,7 @@ func (m *VolumeManager) DiskUpdate(name string, updateDisks map[string]longhorn. if err != nil { return nil, err } - logrus.Debugf("Updated node disks of %v to %+v", name, node.Spec.Disks) + logrus.Infof("Updated node disks of %v to %+v", name, node.Spec.Disks) return node, nil } @@ -143,6 +143,6 @@ func (m *VolumeManager) DeleteNode(name string) error { if err := m.ds.DeleteNode(name); err != nil { return err } - logrus.Debugf("Deleted node %v", name) + logrus.Infof("Deleted node %v", name) return nil } diff --git a/manager/setting.go b/manager/setting.go index 19115b9f63..ff4d276165 100644 --- a/manager/setting.go +++ b/manager/setting.go @@ -52,6 +52,6 @@ func (m *VolumeManager) CreateOrUpdateSetting(s *longhorn.Setting) (*longhorn.Se } return nil, err } - logrus.Debugf("Updated setting %v to %v", s.Name, setting.Value) + logrus.Infof("Updated setting %v to %v", s.Name, setting.Value) return setting, nil } diff --git a/manager/snapshot.go b/manager/snapshot.go index 88e8f3db0e..f97dcb475b 100644 --- a/manager/snapshot.go +++ b/manager/snapshot.go @@ -52,6 +52,6 @@ func (m *VolumeManager) CreateSnapshotCR(snapshotName string, labels map[string] return nil, err } - logrus.Debugf("Created snapshot CR %v with labels %+v for volume %v", snapshotName, labels, volumeName) + logrus.Infof("Created snapshot CR %v with labels %+v for volume %v", snapshotName, labels, volumeName) return snapshotCR, nil } diff --git a/manager/volume.go b/manager/volume.go index 87d671de72..b37a5f87a2 100644 --- a/manager/volume.go +++ b/manager/volume.go @@ -843,7 +843,7 @@ func (m *VolumeManager) UpdateReplicaCount(name string, count int) (v *longhorn. if err != nil { return nil, err } - logrus.Debugf("Updated volume %v replica count from %v to %v", v.Name, oldCount, v.Spec.NumberOfReplicas) + logrus.Infof("Updated volume %v replica count from %v to %v", v.Name, oldCount, v.Spec.NumberOfReplicas) return v, nil } @@ -864,7 +864,7 @@ func (m *VolumeManager) UpdateSnapshotDataIntegrity(name string, value string) ( if err != nil { return nil, err } - logrus.Debugf("Updated volume %v snapshot data integrity from %v to %v", v.Name, oldValue, v.Spec.SnapshotDataIntegrity) + logrus.Infof("Updated volume %v snapshot data integrity from %v to %v", v.Name, oldValue, v.Spec.SnapshotDataIntegrity) return v, nil } @@ -885,7 +885,7 @@ func (m *VolumeManager) UpdateOfflineReplicaRebuilding(name string, value string if err != nil { return nil, err } - logrus.Debugf("Updated volume %v offline replica rebuilding from %v to %v", v.Name, oldValue, v.Spec.OfflineReplicaRebuilding) + logrus.Infof("Updated volume %v offline replica rebuilding from %v to %v", v.Name, oldValue, v.Spec.OfflineReplicaRebuilding) return v, nil } @@ -906,7 +906,7 @@ func (m *VolumeManager) UpdateBackupCompressionMethod(name string, value string) if err != nil { return nil, err } - logrus.Debugf("Updated volume %v backup compression method from %v to %v", v.Name, oldValue, v.Spec.BackupCompressionMethod) + logrus.Infof("Updated volume %v backup compression method from %v to %v", v.Name, oldValue, v.Spec.BackupCompressionMethod) return v, nil } @@ -932,7 +932,7 @@ func (m *VolumeManager) UpdateReplicaAutoBalance(name string, inputSpec longhorn return nil, err } - logrus.Debugf("Updated volume %v replica auto-balance spec from %v to %v", v.Name, oldSpec, v.Spec.ReplicaAutoBalance) + logrus.Infof("Updated volume %v replica auto-balance spec from %v to %v", v.Name, oldSpec, v.Spec.ReplicaAutoBalance) return v, nil } @@ -958,7 +958,7 @@ func (m *VolumeManager) UpdateDataLocality(name string, dataLocality longhorn.Da return nil, err } - logrus.Debugf("Updated volume %v data locality from %v to %v", v.Name, oldDataLocality, v.Spec.DataLocality) + logrus.Infof("Updated volume %v data locality from %v to %v", v.Name, oldDataLocality, v.Spec.DataLocality) return v, nil } diff --git a/metrics_collector/instance_manager_collector.go b/metrics_collector/instance_manager_collector.go index 238560e769..a37c35d8bd 100644 --- a/metrics_collector/instance_manager_collector.go +++ b/metrics_collector/instance_manager_collector.go @@ -230,7 +230,7 @@ func (imc *InstanceManagerCollector) collectGrpcConnection(ch chan<- prometheus. imPod, err := imc.ds.GetPod(im.Name) if err != nil { if datastore.ErrorIsNotFound(err) { - logrus.WithError(err).Debugf("Resetting proxy gRPC connection counter for %v", im.Name) + logrus.WithError(err).Infof("Resetting proxy gRPC connection counter for %v", im.Name) imc.proxyConnCounter.ResetCount() continue } diff --git a/scheduler/replica_scheduler.go b/scheduler/replica_scheduler.go index 6444892151..ba05d2bd0d 100644 --- a/scheduler/replica_scheduler.go +++ b/scheduler/replica_scheduler.go @@ -401,7 +401,7 @@ func (rcs *ReplicaScheduler) scheduleReplicaToDisk(replica *longhorn.Replica, di "disk": replica.Spec.DiskID, "diskPath": replica.Spec.DiskPath, "dataDirectoryName": replica.Spec.DataDirectoryName, - }).Debugf("Schedule replica to node %v", replica.Spec.NodeID) + }).Infof("Schedule replica to node %v", replica.Spec.NodeID) } func (rcs *ReplicaScheduler) getDiskWithMostUsableStorage(disks map[string]*Disk) *Disk { @@ -534,7 +534,7 @@ func (rcs *ReplicaScheduler) RequireNewReplica(replicas map[string]*longhorn.Rep return 0 } - logrus.Debugf("Replica replenishment is delayed until %v", lastDegradedAt.Add(waitInterval)) + logrus.Infof("Replica replenishment is delayed until %v", lastDegradedAt.Add(waitInterval)) // Adding 1 more second to the check back interval to avoid clock skew return lastDegradedAt.Add(waitInterval).Sub(now) + time.Second } diff --git a/types/setting.go b/types/setting.go index 2ee748cea2..8ec8bf8e23 100644 --- a/types/setting.go +++ b/types/setting.go @@ -1333,7 +1333,7 @@ func ValidateSetting(name, value string) (err error) { runAt := schedule.Next(time.Unix(0, 0)) nextRunAt := schedule.Next(runAt) - logrus.Debugf("The interval between two data integrity checks is %v seconds", nextRunAt.Sub(runAt).Seconds()) + logrus.Infof("The interval between two data integrity checks is %v seconds", nextRunAt.Sub(runAt).Seconds()) // multi-choices case SettingNameNodeDownPodDeletionPolicy: diff --git a/upgrade/upgrade.go b/upgrade/upgrade.go index 82c80d6cde..d13b6e0a38 100644 --- a/upgrade/upgrade.go +++ b/upgrade/upgrade.go @@ -222,7 +222,7 @@ func doResourceUpgrade(namespace string, lhClient *lhclientset.Clientset, kubeCl // When lhVersionBeforeUpgrade < v1.5.0, it is v1.4.x. The `CheckUpgradePathSupported` method would have failed us out earlier if it was not v1.4.x. resourceMaps := map[string]interface{}{} if semver.Compare(lhVersionBeforeUpgrade, "v1.5.0") < 0 { - logrus.Debugf("Walking through the resource upgrade path v1.4.x to v1.5.0") + logrus.Info("Walking through the resource upgrade path v1.4.x to v1.5.0") if err := v14xto150.UpgradeResources(namespace, lhClient, kubeClient, resourceMaps); err != nil { return err } @@ -234,7 +234,7 @@ func doResourceUpgrade(namespace string, lhClient *lhclientset.Clientset, kubeCl // When lhVersionBeforeUpgrade < v1.5.0, it is v1.4.x. The `CheckUpgradePathSupported` method would have failed us out earlier if it was not v1.4.x. resourceMaps = map[string]interface{}{} if semver.Compare(lhVersionBeforeUpgrade, "v1.5.0") < 0 { - logrus.Debugf("Walking through the resource status upgrade path v1.4.x to v1.5.0") + logrus.Info("Walking through the resource status upgrade path v1.4.x to v1.5.0") if err := v14xto150.UpgradeResourcesStatus(namespace, lhClient, kubeClient, resourceMaps); err != nil { return err }