diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index 22dbf9784..df1bc117e 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -438,6 +438,10 @@ func (r *KubeVirt) DeleteVM(vm *plan.VMStatus) (err error) { ) if err != nil { err = liberr.Wrap(err) + r.Log.Error(err, + "Failed to query VMs", + "vm", + vm.String()) return } for _, object := range list.Items { @@ -448,6 +452,10 @@ func (r *KubeVirt) DeleteVM(vm *plan.VMStatus) (err error) { if k8serr.IsNotFound(err) { err = nil } else { + r.Log.Error(err, + "Failed to delete VM", + "vm", + vm.String()) return liberr.Wrap(err) } } else { @@ -932,16 +940,16 @@ func (r *KubeVirt) SetPopulatorPodOwnership(vm *plan.VMStatus) (err error) { } // Deletes PVCs that were populated using a volume populator, including prime PVCs -func (r *KubeVirt) DeletePopulatedPVCs(vm *plan.VMStatus) error { +func (r *KubeVirt) DeletePopulatedPVCs(vm *plan.VMStatus, failOnErr bool) error { pvcs, err := r.getPVCs(vm.Ref) if err != nil { return err } for _, pvc := range pvcs { - if err = r.deleteCorrespondingPrimePVC(&pvc, vm); err != nil { + if err = r.deleteCorrespondingPrimePVC(&pvc, vm); err != nil && failOnErr { return err } - if err = r.deletePopulatedPVC(&pvc, vm); err != nil { + if err = r.deletePopulatedPVC(&pvc, vm); err != nil && failOnErr { return err } } @@ -953,10 +961,26 @@ func (r *KubeVirt) deleteCorrespondingPrimePVC(pvc *core.PersistentVolumeClaim, err := r.Destination.Client.Get(context.TODO(), client.ObjectKey{Namespace: r.Plan.Spec.TargetNamespace, Name: fmt.Sprintf("prime-%s", string(pvc.UID))}, &primePVC) switch { case err != nil && !k8serr.IsNotFound(err): + r.Log.Error(err, + "Failed to query prime PVC", + "vm", + vm.String(), + "pvc", + pvc, + ) return err case err == nil: err = r.DeleteObject(&primePVC, vm, "Deleted prime PVC.", "pvc") if err != nil && !k8serr.IsNotFound(err) { + r.Log.Error(err, + "Failed to delete prime PVC", + "vm", + vm.String(), + "pvc", + pvc, + "prime pvc", + primePVC, + ) return err } } @@ -967,12 +991,26 @@ func (r *KubeVirt) deletePopulatedPVC(pvc *core.PersistentVolumeClaim, vm *plan. err := r.DeleteObject(pvc, vm, "Deleted PVC.", "pvc") switch { case err != nil && !k8serr.IsNotFound(err): + r.Log.Error(err, + "Failed to delete PVC", + "vm", + vm.String(), + "pvc", + pvc, + ) return err case err == nil: pvcCopy := pvc.DeepCopy() pvc.Finalizers = nil patch := client.MergeFrom(pvcCopy) if err = r.Destination.Client.Patch(context.TODO(), pvc, patch); err != nil { + r.Log.Error(err, + "Failed to patch PVC (remove finalizers)", + "vm", + vm.String(), + "pvc", + pvc, + ) return err } } diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index d449879fd..3d59d6790 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -344,12 +344,8 @@ func (r *Migration) Archive() { } for _, vm := range r.Plan.Status.Migration.VMs { - if err := r.cleanup(vm); err != nil { - r.Log.Error(err, - "Couldn't clean up VM while archiving plan.", - "vm", - vm.String()) - } + r.Log.Info("Archiving VM", "vm", vm) + r.cleanup(vm, false) } return } @@ -388,12 +384,7 @@ func (r *Migration) Cancel() error { for _, vm := range r.Plan.Status.Migration.VMs { if vm.HasCondition(Canceled) { - if err := r.cleanup(vm); err != nil { - r.Log.Error(err, - "Couldn't clean up after canceled VM migration.", - "vm", - vm.String()) - } + r.cleanup(vm, false) if vm.RestorePowerState == On { if err := r.provider.PowerOn(vm.Ref); err != nil { r.Log.Error(err, @@ -414,45 +405,46 @@ func (r *Migration) Cancel() error { return nil } -func (r *Migration) deletePopulatorPVCs(vm *plan.VMStatus) (err error) { +func (r *Migration) deletePopulatorPVCs(vm *plan.VMStatus, failOnErr bool) (err error) { if r.builder.SupportsVolumePopulators() { - err = r.kubevirt.DeletePopulatedPVCs(vm) + err = r.kubevirt.DeletePopulatedPVCs(vm, failOnErr) } return } // Delete left over migration resources associated with a VM. -func (r *Migration) cleanup(vm *plan.VMStatus) (err error) { +// failOnErr - if you want to delete as much as possible, set to 'false' +func (r *Migration) cleanup(vm *plan.VMStatus, failOnErr bool) (err error) { if !vm.HasCondition(Succeeded) { - if err = r.kubevirt.DeleteVM(vm); err != nil { + if err = r.kubevirt.DeleteVM(vm); err != nil && failOnErr { return } - if err = r.deletePopulatorPVCs(vm); err != nil { + if err = r.deletePopulatorPVCs(vm, failOnErr); err != nil && failOnErr { return } } - if err = r.deleteImporterPods(vm); err != nil { + if err = r.deleteImporterPods(vm); err != nil && failOnErr { return } - if err = r.kubevirt.DeletePVCConsumerPod(vm); err != nil { + if err = r.kubevirt.DeletePVCConsumerPod(vm); err != nil && failOnErr { return } - if err = r.kubevirt.DeleteGuestConversionPod(vm); err != nil { + if err = r.kubevirt.DeleteGuestConversionPod(vm); err != nil && failOnErr { return } - if err = r.kubevirt.DeleteSecret(vm); err != nil { + if err = r.kubevirt.DeleteSecret(vm); err != nil && failOnErr { return } - if err = r.kubevirt.DeleteConfigMap(vm); err != nil { + if err = r.kubevirt.DeleteConfigMap(vm); err != nil && failOnErr { return } - if err = r.kubevirt.DeleteHookJobs(vm); err != nil { + if err = r.kubevirt.DeleteHookJobs(vm); err != nil && failOnErr { return } - if err = r.destinationClient.DeletePopulatorDataSource(vm); err != nil { + if err = r.destinationClient.DeletePopulatorDataSource(vm); err != nil && failOnErr { return } - if err = r.kubevirt.DeletePopulatorPods(vm); err != nil { + if err = r.kubevirt.DeletePopulatorPods(vm); err != nil && failOnErr { return } r.removeWarmSnapshots(vm) @@ -650,7 +642,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { vm.MarkStarted() step.MarkStarted() step.Phase = Running - err = r.cleanup(vm) + err = r.cleanup(vm, true) if err != nil { step.AddError(err.Error()) err = nil