Skip to content

Commit

Permalink
Improving Harvester Maintenance Mode
Browse files Browse the repository at this point in the history
Introduce the label `harvesterhci.io/maintain-mode-strategy` which can be added to a `VirtualMachine` resource to specify the behaviour of the VM in case of a node maintenance event. The following modes are supported:

- `Migrate` (default): The VM will be live-migrated to another node in the cluster. This is the behavior of the maintenance feature as it is at the moment and will be default with this enhancement.
- `ShutdownAndRestartAfterEnable`: Shut down and restart the VM after maintenance mode is enabled. The VM will be scheduled on a different node.
- `ShutdownAndRestartAfterDisable`: Shut down when maintenance mode is enabled and restart the VM after maintenance mode is disabled. The VM will stay on the same node.
- `Shutdown`: Shut down when maintenance mode is enabled. Do NOT restart the VM, it remains switched off.

If the `Force` checkbox is selected in the maintenance dialog, a configured maintenance mode strategy of a VM is overridden and it is shut down and remains off regardless of the configured strategy.
For all other VMs that do not have the label `harvesterhci.io/maintain-mode-strategy`, the current behavior of the forced maintenance mode is applied.

Related to: harvester#5069
HEP: harvester#5984
Docs: harvester/docs#523

Signed-off-by: Volker Theile <vtheile@suse.com>
(cherry picked from commit d4166eb)
  • Loading branch information
votdev authored and bk201 committed Jul 5, 2024
1 parent 4e7387e commit fc5eff4
Show file tree
Hide file tree
Showing 13 changed files with 476 additions and 54 deletions.
55 changes: 52 additions & 3 deletions pkg/api/node/formatter.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,16 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"

ctlnode "github.com/harvester/harvester/pkg/controller/master/node"
"github.com/harvester/harvester/pkg/controller/master/nodedrain"
harvesterctlv1beta1 "github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io/v1beta1"
kubevirtv1 "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io/v1"
ctlkubevirtv1 "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io/v1"
ctllhv1 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2"
"github.com/harvester/harvester/pkg/util"
"github.com/harvester/harvester/pkg/util/drainhelper"
Expand Down Expand Up @@ -83,9 +85,12 @@ type ActionHandler struct {
nodeClient ctlcorev1.NodeClient
longhornVolumeCache ctllhv1.VolumeCache
longhornReplicaCache ctllhv1.ReplicaCache
virtualMachineInstanceCache kubevirtv1.VirtualMachineInstanceCache
virtualMachineClient ctlkubevirtv1.VirtualMachineClient
virtualMachineCache ctlkubevirtv1.VirtualMachineCache
virtualMachineInstanceCache ctlkubevirtv1.VirtualMachineInstanceCache
addonCache harvesterctlv1beta1.AddonCache
dynamicClient dynamic.Interface
virtSubresourceRestClient rest.Interface
ctx context.Context
}

Expand Down Expand Up @@ -189,7 +194,51 @@ func (h ActionHandler) disableMaintenanceMode(nodeName string) error {
delete(node.Annotations, ctlnode.MaintainStatusAnnotationKey)
}

return h.retryMaintenanceModeUpdate(nodeName, disableMaintaenanceModeFunc, "disable")
err := h.retryMaintenanceModeUpdate(nodeName, disableMaintaenanceModeFunc, "disable")
if err != nil {
return err
}

// Restart those VMs that have been labeled to be shut down before
// maintenance mode and that should be restarted when the maintenance
// mode has been disabled again.
node, err := h.nodeCache.Get(nodeName)
if err != nil {
return err
}
selector := labels.Set{util.LabelMaintainModeStrategy: util.MaintainModeStrategyShutdownAndRestartAfterDisable}.AsSelector()
vmList, err := h.virtualMachineCache.List(node.Namespace, selector)
if err != nil {
return fmt.Errorf("failed to list VMs with labels %s: %w", selector.String(), err)
}
for _, vm := range vmList {
// Make sure that this VM was shut down as part of the maintenance
// mode of the given node.
if vm.Annotations[util.AnnotationMaintainModeStrategyNodeName] != nodeName {
continue
}

logrus.WithFields(logrus.Fields{
"namespace": vm.Namespace,
"virtualmachine_name": vm.Name,
}).Info("restarting the VM that was shut down in maintenance mode")

err := h.virtSubresourceRestClient.Put().Namespace(vm.Namespace).Resource("virtualmachines").SubResource("start").Name(vm.Name).Do(h.ctx).Error()
if err != nil {
return fmt.Errorf("failed to start VM %s/%s: %w", vm.Namespace, vm.Name, err)
}

// Remove the annotation that was previously set when the node
// went into maintenance mode.
vmCopy := vm.DeepCopy()
delete(vmCopy.Annotations, util.AnnotationMaintainModeStrategyNodeName)
_, err = h.virtualMachineClient.Update(vmCopy)
if err != nil {
return err
}
}

return nil
}

func (h ActionHandler) retryMaintenanceModeUpdate(nodeName string, updateFunc maintenanceModeUpdateFunc, actionName string) error {
Expand Down
13 changes: 6 additions & 7 deletions pkg/api/node/formatter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (

harvesterv1beta1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
"github.com/harvester/harvester/pkg/generated/clientset/versioned/fake"
"github.com/harvester/harvester/pkg/generated/clientset/versioned/scheme"
"github.com/harvester/harvester/pkg/util/fakeclients"
)

Expand Down Expand Up @@ -297,8 +298,6 @@ var (
},
}

scheme = runtime.NewScheme()

vmWithCDROM = &kubevirtv1.VirtualMachineInstance{
ObjectMeta: metav1.ObjectMeta{
Name: "cdrom-vm",
Expand Down Expand Up @@ -398,13 +397,13 @@ func Test_listUnhealthyVM(t *testing.T) {
func Test_powerActionNotPossible(t *testing.T) {
assert := require.New(t)

err := harvesterv1beta1.AddToScheme(scheme)
err := harvesterv1beta1.AddToScheme(scheme.Scheme)
assert.NoError(err, "expected no error building scheme")

typedObjects := []runtime.Object{}
client := fake.NewSimpleClientset(typedObjects...)
k8sclientset := k8sfake.NewSimpleClientset(testNode)
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme)
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme.Scheme)

h := ActionHandler{
nodeCache: fakeclients.NodeCache(k8sclientset.CoreV1().Nodes),
Expand All @@ -422,13 +421,13 @@ func Test_powerActionNotPossible(t *testing.T) {
func Test_powerActionPossible(t *testing.T) {
assert := require.New(t)

err := harvesterv1beta1.AddToScheme(scheme)
err := harvesterv1beta1.AddToScheme(scheme.Scheme)
assert.NoError(err, "expected no error building scheme")

typedObjects := []runtime.Object{seederAddon}
client := fake.NewSimpleClientset(typedObjects...)
k8sclientset := k8sfake.NewSimpleClientset(testNode)
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme, dynamicInventoryObj)
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme.Scheme, dynamicInventoryObj)

h := ActionHandler{
nodeCache: fakeclients.NodeCache(k8sclientset.CoreV1().Nodes),
Expand All @@ -447,7 +446,7 @@ func Test_powerAction(t *testing.T) {

powerOperation := "shutdown"
k8sclientset := k8sfake.NewSimpleClientset(testNode)
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme, dynamicInventoryObj)
fakeDynamicClient := fakedynamic.NewSimpleDynamicClient(scheme.Scheme, dynamicInventoryObj)
h := ActionHandler{
nodeCache: fakeclients.NodeCache(k8sclientset.CoreV1().Nodes),
nodeClient: fakeclients.NodeClient(k8sclientset.CoreV1().Nodes),
Expand Down
16 changes: 16 additions & 0 deletions pkg/api/node/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,12 @@ import (
"github.com/rancher/steve/pkg/schema"
"github.com/rancher/steve/pkg/server"
"github.com/rancher/wrangler/pkg/schemas"
k8sschema "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"

"github.com/harvester/harvester/pkg/config"
"github.com/harvester/harvester/pkg/generated/clientset/versioned/scheme"
)

type MaintenanceModeInput struct {
Expand All @@ -32,14 +35,27 @@ func RegisterSchema(scaled *config.Scaled, server *server.Server, _ config.Optio
if err != nil {
return fmt.Errorf("error creating dyanmic client: %v", err)
}

copyConfig := rest.CopyConfig(server.RESTConfig)
copyConfig.GroupVersion = &k8sschema.GroupVersion{Group: "subresources.kubevirt.io", Version: "v1"}
copyConfig.APIPath = "/apis"
copyConfig.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
virtSubresourceClient, err := rest.RESTClientFor(copyConfig)
if err != nil {
return err
}

nodeHandler := ActionHandler{
nodeClient: scaled.Management.CoreFactory.Core().V1().Node(),
nodeCache: scaled.Management.CoreFactory.Core().V1().Node().Cache(),
longhornReplicaCache: scaled.Management.LonghornFactory.Longhorn().V1beta2().Replica().Cache(),
longhornVolumeCache: scaled.Management.LonghornFactory.Longhorn().V1beta2().Volume().Cache(),
virtualMachineClient: scaled.Management.VirtFactory.Kubevirt().V1().VirtualMachine(),
virtualMachineCache: scaled.Management.VirtFactory.Kubevirt().V1().VirtualMachine().Cache(),
virtualMachineInstanceCache: scaled.Management.VirtFactory.Kubevirt().V1().VirtualMachineInstance().Cache(),
addonCache: scaled.Management.HarvesterFactory.Harvesterhci().V1beta1().Addon().Cache(),
dynamicClient: dynamicClient,
virtSubresourceRestClient: virtSubresourceClient,
ctx: scaled.Ctx,
}

Expand Down
7 changes: 2 additions & 5 deletions pkg/controller/master/backup/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,6 @@ const (
pvcNameSpaceAnnotation = "pvc.harvesterhci.io/namespace"
pvcNameAnnotation = "pvc.harvesterhci.io/name"

vmCreatorLabel = "harvesterhci.io/creator"
vmNameLabel = "harvesterhci.io/vmName"

restoreErrorEvent = "VirtualMachineRestoreError"
restoreCompleteEvent = "VirtualMachineRestoreComplete"

Expand Down Expand Up @@ -681,8 +678,8 @@ func (h *RestoreHandler) createNewVM(restore *harvesterv1.VirtualMachineRestore,
ObjectMeta: metav1.ObjectMeta{
Annotations: newVMSpecAnnotations,
Labels: map[string]string{
vmCreatorLabel: "harvester",
vmNameLabel: vmName,
util.LabelVMCreator: "harvester",
util.LabelVMName: vmName,
},
},
Spec: sanitizeVirtualMachineForRestore(restore, vmCpy.Spec.Template.Spec),
Expand Down
95 changes: 87 additions & 8 deletions pkg/controller/master/node/maintain_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,22 @@ package node

import (
"context"
"fmt"

ctlcorev1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
kubevirtv1 "kubevirt.io/api/core/v1"

"github.com/harvester/harvester/pkg/config"
v1 "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io/v1"
"github.com/harvester/harvester/pkg/util"
"github.com/harvester/harvester/pkg/util/virtualmachineinstance"
)

const (
maintainNodeControllerName = "maintain-node-controller"
labelNodeNameKey = "kubevirt.io/nodeName"

maintainNodeControllerName = "maintain-node-controller"
MaintainStatusAnnotationKey = "harvesterhci.io/maintain-status"
MaintainStatusComplete = "completed"
MaintainStatusRunning = "running"
Expand All @@ -25,20 +28,26 @@ const (
type maintainNodeHandler struct {
nodes ctlcorev1.NodeClient
nodeCache ctlcorev1.NodeCache
virtualMachineClient v1.VirtualMachineClient
virtualMachineCache v1.VirtualMachineCache
virtualMachineInstanceCache v1.VirtualMachineInstanceCache
}

// MaintainRegister registers the node controller
func MaintainRegister(ctx context.Context, management *config.Management, _ config.Options) error {
nodes := management.CoreFactory.Core().V1().Node()
vms := management.VirtFactory.Kubevirt().V1().VirtualMachine()
vmis := management.VirtFactory.Kubevirt().V1().VirtualMachineInstance()
maintainNodeHandler := &maintainNodeHandler{
nodes: nodes,
nodeCache: nodes.Cache(),
virtualMachineClient: vms,
virtualMachineCache: vms.Cache(),
virtualMachineInstanceCache: vmis.Cache(),
}

nodes.OnChange(ctx, maintainNodeControllerName, maintainNodeHandler.OnNodeChanged)
nodes.OnRemove(ctx, maintainNodeControllerName, maintainNodeHandler.OnNodeRemoved)

return nil
}
Expand All @@ -51,17 +60,87 @@ func (h *maintainNodeHandler) OnNodeChanged(_ string, node *corev1.Node) (*corev
if maintenanceStatus, ok := node.Annotations[MaintainStatusAnnotationKey]; !ok || maintenanceStatus != MaintainStatusRunning {
return node, nil
}
sets := labels.Set{
labelNodeNameKey: node.Name,
}
vmis, err := h.virtualMachineInstanceCache.List(corev1.NamespaceAll, sets.AsSelector())

// Wait until no VMs are running on that node.
vmiList, err := virtualmachineinstance.ListByNode(node, labels.NewSelector(), h.virtualMachineInstanceCache)
if err != nil {
return node, err
}
if len(vmis) != 0 {
if len(vmiList) != 0 {
return node, nil
}

// Restart those VMs that have been labeled to be shut down before
// maintenance mode and that should be restarted when the node has
// successfully switched into maintenance mode.
selector := labels.Set{util.LabelMaintainModeStrategy: util.MaintainModeStrategyShutdownAndRestartAfterEnable}.AsSelector()
vmList, err := h.virtualMachineCache.List(node.Namespace, selector)
if err != nil {
return node, fmt.Errorf("failed to list VMs with labels %s: %w", selector.String(), err)
}
for _, vm := range vmList {
// Make sure that this VM was shut down as part of the maintenance
// mode of the given node.
if vm.Annotations[util.AnnotationMaintainModeStrategyNodeName] != node.Name {
continue
}

logrus.WithFields(logrus.Fields{
"namespace": vm.Namespace,
"virtualmachine_name": vm.Name,
}).Info("restarting the VM that was temporary shut down for maintenance mode")

// Update the run strategy of the VM to start it and remove the
// annotation that was previously set when the node went into
// maintenance mode.
// Get the running strategy that is stored in the annotation of the
// VM when it is shut down. Note, in general this is automatically
// patched by the VM mutator in general.
runStrategy := kubevirtv1.VirtualMachineRunStrategy(vm.Annotations[util.AnnotationRunStrategy])
if runStrategy == "" {
runStrategy = kubevirtv1.RunStrategyRerunOnFailure
}
vmCopy := vm.DeepCopy()
vmCopy.Spec.RunStrategy = &[]kubevirtv1.VirtualMachineRunStrategy{runStrategy}[0]
delete(vmCopy.Annotations, util.AnnotationMaintainModeStrategyNodeName)
_, err = h.virtualMachineClient.Update(vmCopy)
if err != nil {
return node, fmt.Errorf("failed to start VM %s/%s: %w", vm.Namespace, vm.Name, err)
}
}

toUpdate := node.DeepCopy()
toUpdate.Annotations[MaintainStatusAnnotationKey] = MaintainStatusComplete
return h.nodes.Update(toUpdate)
}

// OnNodeRemoved Ensure that all "harvesterhci.io/maintain-mode-strategy-node-name"
// annotations on VMs are removed that are referencing this node.
func (h *maintainNodeHandler) OnNodeRemoved(_ string, node *corev1.Node) (*corev1.Node, error) {
if node == nil || node.DeletionTimestamp == nil || node.Annotations == nil {
return node, nil
}

if _, ok := node.Annotations[MaintainStatusAnnotationKey]; !ok {
return node, nil
}

vms, err := h.virtualMachineCache.List(corev1.NamespaceAll, labels.Everything())
if err != nil {
return node, fmt.Errorf("failed to list VMs: %w", err)
}

for _, vm := range vms {
if vm.Annotations == nil || vm.Annotations[util.AnnotationMaintainModeStrategyNodeName] != node.Name {
continue
}
vmCopy := vm.DeepCopy()
delete(vmCopy.Annotations, util.AnnotationMaintainModeStrategyNodeName)
_, err = h.virtualMachineClient.Update(vmCopy)
if err != nil {
return node, err
}
}

return node, nil
}
Loading

0 comments on commit fc5eff4

Please sign in to comment.