Skip to content

Commit

Permalink
Move some debug level messages to info level
Browse files Browse the repository at this point in the history
Signed-off-by: Derek Su <derek.su@suse.com>
  • Loading branch information
derekbit committed Jul 10, 2023
1 parent 2c5b623 commit 4bc4d37
Show file tree
Hide file tree
Showing 23 changed files with 77 additions and 77 deletions.
2 changes: 1 addition & 1 deletion app/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func deployDriver(c *cli.Context) error {
return errors.Wrap(err, "CSI cannot be deployed because MountPropagation is not set")
}

logrus.Debug("Deploying CSI driver")
logrus.Info("Deploying CSI driver")
return deployCSIDriver(kubeClient, lhClient, c, managerImage, managerURL)
}

Expand Down
18 changes: 9 additions & 9 deletions app/recurring_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ func (job *Job) doSnapshotCleanup(backupDone bool) (err error) {
}); err != nil {
return err
}
job.logger.Debugf("Cleaned up snapshot CR %v for %v", snapshotName, volumeName)
job.logger.Infof("Cleaned up snapshot CR %v for %v", snapshotName, volumeName)
}

if job.task == longhorn.RecurringJobTypeSnapshotCleanup {
Expand Down Expand Up @@ -471,7 +471,7 @@ func (job *Job) deleteSnapshots(names []string, volume *longhornclient.Volume, v
if err != nil {
return err
}
job.logger.WithField("volume", volume.Name).Debugf("Deleted snapshot %v", name)
job.logger.WithField("volume", volume.Name).Infof("Deleted snapshot %v", name)
}
return nil
}
Expand Down Expand Up @@ -514,7 +514,7 @@ func (job *Job) purgeSnapshots(volume *longhornclient.Volume, volumeAPI longhorn
}
job.logger.Warn("Encountered one or more errors while purging snapshots")
}
job.logger.WithField("volume", volume.Name).Debug("Purged snapshots")
job.logger.WithField("volume", volume.Name).Info("Purged snapshots")
return nil
}

Expand Down Expand Up @@ -626,9 +626,9 @@ func (job *Job) doRecurringBackup() (err error) {
switch info.State {
case string(longhorn.BackupStateCompleted):
complete = true
job.logger.Debugf("Complete creating backup %v", info.Id)
job.logger.Infof("Completed creating backup %v", info.Id)
case string(longhorn.BackupStateNew), string(longhorn.BackupStateInProgress):
job.logger.Debugf("Creating backup %v, current progress %v", info.Id, info.Progress)
job.logger.Infof("Creating backup %v, current progress %v", info.Id, info.Progress)
case string(longhorn.BackupStateError), string(longhorn.BackupStateUnknown):
return fmt.Errorf("failed to create backup %v: %v", info.Id, info.Error)
default:
Expand Down Expand Up @@ -663,7 +663,7 @@ func (job *Job) doRecurringBackup() (err error) {
}); err != nil {
return fmt.Errorf("cleaned up backup %v failed for %v: %v", backup, job.volumeName, err)
}
job.logger.Debugf("Cleaned up backup %v for %v", backup, job.volumeName)
job.logger.Infof("Cleaned up backup %v for %v", backup, job.volumeName)
}

if err := job.doSnapshotCleanup(true); err != nil {
Expand Down Expand Up @@ -865,7 +865,7 @@ func filterVolumesForJob(allowDetached bool, volumes []longhorn.Volume, filterNa
}

if volume.Status.RestoreRequired {
logger.Debugf("Bypassed to create job for %v volume during restoring from the backup", volume.Name)
logger.Infof("Bypassed to create job for %v volume during restoring from the backup", volume.Name)
continue
}

Expand All @@ -874,7 +874,7 @@ func filterVolumesForJob(allowDetached bool, volumes []longhorn.Volume, filterNa
*filterNames = append(*filterNames, volume.Name)
continue
}
logger.Debugf("Cannot create job for %v volume in state %v", volume.Name, volume.Status.State)
logger.Warnf("Cannot create job for %v volume in state %v", volume.Name, volume.Status.State)
}
}

Expand All @@ -883,7 +883,7 @@ func getVolumesBySelector(recurringJobType, recurringJobName, namespace string,

label := fmt.Sprintf("%s=%s",
types.GetRecurringJobLabelKey(recurringJobType, recurringJobName), types.LonghornLabelValueEnabled)
logger.Debugf("Get volumes from label %v", label)
logger.Infof("Got volumes from label %v", label)

volumes, err := client.LonghornV1beta2().Volumes(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: label,
Expand Down
4 changes: 2 additions & 2 deletions controller/engine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1401,7 +1401,7 @@ func checkSizeBeforeRestoration(log logrus.FieldLogger, engine *longhorn.Engine,
if !datastore.ErrorIsConflict(err) {
return false, err
}
log.WithField("volume", v.Name).Debug("Retrying size update for DR volume before restore")
log.WithField("volume", v.Name).Warn("Retrying size update for DR volume before restore")
continue
}
return false, nil
Expand Down Expand Up @@ -1664,7 +1664,7 @@ func (ec *EngineController) rebuildNewReplica(e *longhorn.Engine) error {
}
// We cannot rebuild more than one replica at one time
if rebuildingInProgress {
ec.logger.WithField("volume", e.Spec.VolumeName).Debug("Skipped rebuilding of replica because there is another rebuild in progress")
ec.logger.WithField("volume", e.Spec.VolumeName).Info("Skipped rebuilding of replica because there is another rebuild in progress")
return nil
}
for replica, addr := range e.Status.CurrentReplicaAddressMap {
Expand Down
6 changes: 3 additions & 3 deletions controller/kubernetes_pod_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ func (kc *KubernetesPodController) getAssociatedVolumes(pod *v1.Pod) ([]*longhor

pvc, err := kc.ds.GetPersistentVolumeClaimRO(pod.Namespace, v.VolumeSource.PersistentVolumeClaim.ClaimName)
if datastore.ErrorIsNotFound(err) {
log.WithError(err).Debugf("Cannot auto-delete Pod when the associated PersistentVolumeClaim is not found")
log.WithError(err).Warn("Cannot auto-delete Pod when the associated PersistentVolumeClaim is not found")
continue
}
if err != nil {
Expand All @@ -448,7 +448,7 @@ func (kc *KubernetesPodController) getAssociatedVolumes(pod *v1.Pod) ([]*longhor

pv, err := kc.getAssociatedPersistentVolume(pvc)
if datastore.ErrorIsNotFound(err) {
log.WithError(err).Debugf("Cannot auto-delete Pod when the associated PersistentVolume is not found")
log.WithError(err).Warn("Cannot auto-delete Pod when the associated PersistentVolume is not found")
continue
}
if err != nil {
Expand All @@ -458,7 +458,7 @@ func (kc *KubernetesPodController) getAssociatedVolumes(pod *v1.Pod) ([]*longhor
if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == types.LonghornDriverName {
vol, err := kc.ds.GetVolume(pv.GetName())
if datastore.ErrorIsNotFound(err) {
log.WithError(err).Debugf("Cannot auto-delete Pod when the associated Volume is not found")
log.WithError(err).Warn("Cannot auto-delete Pod when the associated Volume is not found")
continue
}
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion controller/monitor/snapshot_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ func (m *SnapshotMonitor) shouldAddToInProgressSnapshotCheckTasks(snapshotName s

_, ok := m.inProgressSnapshotCheckTasks[snapshotName]
if ok {
m.logger.WithField("monitor", monitorName).Debugf("snapshot %s is being checked", snapshotName)
m.logger.WithField("monitor", monitorName).Infof("Checking snapshot %s", snapshotName)
return false
}
m.inProgressSnapshotCheckTasks[snapshotName] = struct{}{}
Expand Down
4 changes: 2 additions & 2 deletions controller/replica_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,14 +244,14 @@ func (rc *ReplicaController) UpdateReplicaEvictionStatus(replica *longhorn.Repli
if rc.isEvictionRequested(replica) &&
!replica.Status.EvictionRequested {
replica.Status.EvictionRequested = true
log.Debug("Replica has requested eviction")
log.Info("Replica has requested eviction")
}

// Check if eviction has been cancelled on this replica
if !rc.isEvictionRequested(replica) &&
replica.Status.EvictionRequested {
replica.Status.EvictionRequested = false
log.Debug("Replica has cancelled eviction")
log.Info("Replica has cancelled eviction")
}

}
Expand Down
32 changes: 16 additions & 16 deletions controller/setting_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,7 @@ func (bst *BackupStoreTimer) Start() {
return false, err
}

log.Debug("Triggering sync backup target")
log.Info("Triggering sync backup target")
backupTarget.Spec.SyncRequestedAt = metav1.Time{Time: time.Now().UTC()}
if _, err = bst.ds.UpdateBackupTarget(backupTarget); err != nil && !apierrors.IsConflict(errors.Cause(err)) {
log.WithError(err).Warn("Failed to updating backup target")
Expand Down Expand Up @@ -1223,23 +1223,23 @@ type ClusterInfoStructFields struct {

func (info *ClusterInfo) collectClusterScope() {
if err := info.collectNamespace(); err != nil {
info.logger.WithError(err).Debug("Failed to collect Longhorn namespace")
info.logger.WithError(err).Warn("Failed to collect Longhorn namespace")
}

if err := info.collectNodeCount(); err != nil {
info.logger.WithError(err).Debug("Failed to collect number of Longhorn nodes")
info.logger.WithError(err).Warn("Failed to collect number of Longhorn nodes")
}

if err := info.collectResourceUsage(); err != nil {
info.logger.WithError(err).Debug("Failed to collect Longhorn resource usages")
info.logger.WithError(err).Warn("Failed to collect Longhorn resource usages")
}

if err := info.collectVolumesInfo(); err != nil {
info.logger.WithError(err).Debug("Failed to collect Longhorn Volumes info")
info.logger.WithError(err).Warn("Failed to collect Longhorn Volumes info")
}

if err := info.collectSettings(); err != nil {
info.logger.WithError(err).Debug("Failed to collect Longhorn settings")
info.logger.WithError(err).Warn("Failed to collect Longhorn settings")
}
}

Expand Down Expand Up @@ -1271,13 +1271,13 @@ func (info *ClusterInfo) collectResourceUsage() error {
MatchLabels: label,
})
if err != nil {
logrus.WithError(err).Debugf("Failed to get %v label for %v", label, component)
logrus.WithError(err).Warnf("Failed to get %v label for %v", label, component)
continue
}

pods, err := info.ds.ListPodsBySelector(selector)
if err != nil {
logrus.WithError(err).Debugf("Failed to list %v Pod by %v label", component, label)
logrus.WithError(err).Warnf("Failed to list %v Pod by %v label", component, label)
continue
}
podCount := len(pods)
Expand All @@ -1291,7 +1291,7 @@ func (info *ClusterInfo) collectResourceUsage() error {
for _, pod := range pods {
podMetrics, err := metricsClient.PodMetricses(info.namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
logrus.WithError(err).Debugf("Failed to get %v Pod", pod.Name)
logrus.WithError(err).Warnf("Failed to get %v Pod", pod.Name)
continue
}
for _, container := range podMetrics.Containers {
Expand Down Expand Up @@ -1393,7 +1393,7 @@ func (info *ClusterInfo) collectSettings() error {
case include[settingName]:
convertedValue, err := info.convertSettingValueType(setting)
if err != nil {
logrus.WithError(err).Debugf("failed to convert Setting %v value", setting.Name)
logrus.WithError(err).Warnf("Failed to convert Setting %v value", setting.Name)
continue
}
settingMap[setting.Name] = convertedValue
Expand Down Expand Up @@ -1549,7 +1549,7 @@ func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue st
if volumeSpecValue == ignoredValue {
globalSetting, err := info.ds.GetSetting(settingName)
if err != nil {
info.logger.WithError(err).Debugf("Failed to get Longhorn Setting %v", settingName)
info.logger.WithError(err).Warnf("Failed to get Longhorn Setting %v", settingName)
}
return globalSetting.Value
}
Expand All @@ -1558,19 +1558,19 @@ func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue st

func (info *ClusterInfo) collectNodeScope() {
if err := info.collectHostKernelRelease(); err != nil {
info.logger.WithError(err).Debug("Failed to collect host kernel release")
info.logger.WithError(err).Warn("Failed to collect host kernel release")
}

if err := info.collectHostOSDistro(); err != nil {
info.logger.WithError(err).Debug("Failed to collect host OS distro")
info.logger.WithError(err).Warn("Failed to collect host OS distro")
}

if err := info.collectNodeDiskCount(); err != nil {
info.logger.WithError(err).Debug("Failed to collect number of node disks")
info.logger.WithError(err).Warn("Failed to collect number of node disks")
}

if err := info.collectKubernetesNodeProvider(); err != nil {
info.logger.WithError(err).Debug("Failed to collect node provider")
info.logger.WithError(err).Warn("Failed to collect node provider")
}
}

Expand Down Expand Up @@ -1612,7 +1612,7 @@ func (info *ClusterInfo) collectNodeDiskCount() error {
for _, disk := range node.Spec.Disks {
deviceType, err := types.GetDeviceTypeOf(disk.Path)
if err != nil {
info.logger.WithError(err).Debugf("Failed to get device type of %v", disk.Path)
info.logger.WithError(err).Warnf("Failed to get device type of %v", disk.Path)
deviceType = types.ValueUnknown
}
structMap[util.StructName(fmt.Sprintf(ClusterInfoNodeDiskCountFmt, strings.ToUpper(deviceType)))]++
Expand Down
6 changes: 3 additions & 3 deletions controller/volume_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1647,7 +1647,7 @@ func isVolumeOfflineUpgrade(v *longhorn.Volume) bool {

func (c *VolumeController) openVolumeDependentResources(v *longhorn.Volume, e *longhorn.Engine, rs map[string]*longhorn.Replica, log *logrus.Entry) error {
if isVolumeOfflineUpgrade(v) {
log.Debug("Wait for offline volume upgrade to finish")
log.Info("Waiting for offline volume upgrade to finish")
return nil
}

Expand Down Expand Up @@ -2060,7 +2060,7 @@ func (c *VolumeController) replenishReplicas(v *longhorn.Volume, e *longhorn.Eng
rs[reusableFailedReplica.Name] = reusableFailedReplica
continue
}
log.Debugf("Failed to reuse failed replica %v immediately, backoff period is %v now",
log.Warnf("Failed to reuse failed replica %v immediately, backoff period is %v now",
reusableFailedReplica.Name, c.backoff.Get(reusableFailedReplica.Name).Seconds())
// Couldn't reuse the replica. Add the volume back to the workqueue to check it later
c.enqueueVolumeAfter(v, c.backoff.Get(reusableFailedReplica.Name))
Expand Down Expand Up @@ -2494,7 +2494,7 @@ func (c *VolumeController) getIsSchedulableToDiskNodes(v *longhorn.Volume, nodeN
// TODO: record the message to condition
log.Warn("Found 0 node has at least one schedulable disk")
} else {
log.Debugf("Found node %v has at least one schedulable disk", schedulableNodeNames)
log.Infof("Found node %v has at least one schedulable disk", schedulableNodeNames)
}
}()

Expand Down
4 changes: 2 additions & 2 deletions csi/crypto/crypto.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func EncryptVolume(devicePath, passphrase string, cryptoParams *EncryptParams) e
// OpenVolume opens volume so that it can be used by the client.
func OpenVolume(volume, devicePath, passphrase string) error {
if isOpen, _ := IsDeviceOpen(VolumeMapper(volume)); isOpen {
logrus.Debugf("Device %s is already opened at %s", devicePath, VolumeMapper(volume))
logrus.Infof("Device %s is already opened at %s", devicePath, VolumeMapper(volume))
return nil
}

Expand Down Expand Up @@ -121,7 +121,7 @@ func DeviceEncryptionStatus(devicePath string) (mappedDevice, mapper string, err
volume := strings.TrimPrefix(devicePath, mapperFilePathPrefix+"/")
stdout, err := luksStatus(volume)
if err != nil {
logrus.WithError(err).Debugf("Device %s is not an active LUKS device", devicePath)
logrus.WithError(err).Warnf("Device %s is not an active LUKS device", devicePath)
return devicePath, "", nil
}
lines := strings.Split(string(stdout), "\n")
Expand Down
2 changes: 1 addition & 1 deletion csi/deployment_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func deploy(kubeClient *clientset.Clientset, obj runtime.Object, resource string
existingMeta.GetDeletionTimestamp() == nil &&
!needToUpdateImage(existing, obj) {
// deployment of correct version already deployed
logrus.Debugf("Detected %v %v CSI Git commit %v version %v Kubernetes version %v has already been deployed",
logrus.Infof("Detected %v %v CSI Git commit %v version %v Kubernetes version %v has already been deployed",
resource, name, annos[AnnotationCSIGitCommit], annos[AnnotationCSIVersion], annos[AnnotationKubernetesVersion])
return nil
}
Expand Down
8 changes: 4 additions & 4 deletions csi/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,15 +284,15 @@ func syncMountPointDirectory(targetPath string) error {
// in case where the mount point exists but is corrupt, the mount point will be cleaned up and a error is returned
// the underlying implementation utilizes mounter.IsLikelyNotMountPoint so it cannot detect bind mounts
func ensureMountPoint(targetPath string, mounter mount.Interface) (bool, error) {
logrus.Debugf("Trying to ensure mount point %v", targetPath)
logrus.Infof("Trying to ensure mount point %v", targetPath)
notMnt, err := mount.IsNotMountPoint(mounter, targetPath)
if os.IsNotExist(err) {
return false, os.MkdirAll(targetPath, 0750)
}

IsCorruptedMnt := mount.IsCorruptedMnt(err)
if !IsCorruptedMnt {
logrus.Debugf("Mount point %v try opening and syncing dir to make sure it's healthy", targetPath)
logrus.Infof("Mount point %v try opening and syncing dir to make sure it's healthy", targetPath)
if err := syncMountPointDirectory(targetPath); err != nil {
logrus.WithError(err).Warnf("Mount point %v was identified as corrupt by opening and syncing", targetPath)
IsCorruptedMnt = true
Expand All @@ -317,10 +317,10 @@ func unmount(targetPath string, mounter mount.Interface) error {

forceUnmounter, ok := mounter.(mount.MounterForceUnmounter)
if ok {
logrus.Debugf("Trying to force unmount potential mount point %v", targetPath)
logrus.Infof("Trying to force unmount potential mount point %v", targetPath)
err = forceUnmounter.UnmountWithForce(targetPath, defaultForceUmountTimeout)
} else {
logrus.Debugf("Trying to unmount potential mount point %v", targetPath)
logrus.Infof("Trying to unmount potential mount point %v", targetPath)
err = mounter.Unmount(targetPath)
}
if err == nil {
Expand Down
Loading

0 comments on commit 4bc4d37

Please sign in to comment.