Skip to content

Commit

Permalink
feat: get node name from guest agent
Browse files Browse the repository at this point in the history
Signed-off-by: PoAn Yang <poan.yang@suse.com>
  • Loading branch information
FrankYang0529 committed Feb 29, 2024
1 parent 00ec411 commit d64b2a0
Show file tree
Hide file tree
Showing 3 changed files with 94 additions and 27 deletions.
33 changes: 28 additions & 5 deletions pkg/cloud-controller-manager/ccm.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"io"
"os"
"sync"

ctllb "github.com/harvester/harvester-load-balancer/pkg/generated/controllers/loadbalancer.harvesterhci.io"
ctlkubevirt "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io"
Expand All @@ -14,6 +15,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog/v2"
"kubevirt.io/client-go/kubecli"

vmi "github.com/harvester/harvester-cloud-provider/pkg/controller/virtualmachineinstance"
)
Expand All @@ -32,8 +34,13 @@ type CloudProvider struct {
loadBalancers cloudprovider.LoadBalancer
instances cloudprovider.InstancesV2

Context context.Context
Namespace string
kubevirtClient kubecli.KubevirtClient

nodeToVMName *sync.Map

Context context.Context

namespace string
}

func init() {
Expand Down Expand Up @@ -69,12 +76,24 @@ func newCloudProvider(reader io.Reader) (cloudprovider.Interface, error) {
Namespace: namespace,
})

kubevirtClient, err := kubecli.GetKubevirtClientFromClientConfig(config)
if err != nil {
return nil, err
}

nodeToVMName := &sync.Map{}
cp := &CloudProvider{
localCoreFactory: ctlcore.NewFactoryFromConfigOrDie(localCfg),
lbFactory: ctllb.NewFactoryFromConfigOrDie(clientConfig),
kubevirtFactory: kubevirtFactory,

kubevirtClient: kubevirtClient,

nodeToVMName: nodeToVMName,

Context: signals.SetupSignalContext(),

namespace: namespace,
}
cp.loadBalancers = &LoadBalancerManager{
lbClient: cp.lbFactory.Loadbalancer().V1beta1().LoadBalancer(),
Expand All @@ -83,9 +102,10 @@ func newCloudProvider(reader io.Reader) (cloudprovider.Interface, error) {
namespace: namespace,
}
cp.instances = &instanceManager{
vmClient: cp.kubevirtFactory.Kubevirt().V1().VirtualMachine(),
vmiClient: cp.kubevirtFactory.Kubevirt().V1().VirtualMachineInstance(),
namespace: namespace,
vmClient: cp.kubevirtFactory.Kubevirt().V1().VirtualMachine(),
vmiClient: cp.kubevirtFactory.Kubevirt().V1().VirtualMachineInstance(),
nodeToVMName: nodeToVMName,
namespace: namespace,
}

return cp, nil
Expand All @@ -99,6 +119,9 @@ func (c *CloudProvider) Initialize(clientBuilder cloudprovider.ControllerClientB
client,
c.localCoreFactory.Core().V1().Node(),
c.kubevirtFactory.Kubevirt().V1().VirtualMachineInstance(),
c.kubevirtClient,
c.nodeToVMName,
c.namespace,
)

go func() {
Expand Down
30 changes: 20 additions & 10 deletions pkg/cloud-controller-manager/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package ccm
import (
"context"
"net"
"sync"

ctlkubevirtv1 "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io/v1"
v1 "k8s.io/api/core/v1"
Expand All @@ -14,31 +15,32 @@ import (
)

type instanceManager struct {
vmClient ctlkubevirtv1.VirtualMachineClient
vmiClient ctlkubevirtv1.VirtualMachineInstanceClient
namespace string
vmClient ctlkubevirtv1.VirtualMachineClient
vmiClient ctlkubevirtv1.VirtualMachineInstanceClient
nodeToVMName *sync.Map
namespace string
}

func (i *instanceManager) InstanceExists(ctx context.Context, node *v1.Node) (bool, error) {
if _, err := i.vmClient.Get(i.namespace, node.Name, metav1.GetOptions{}); err != nil && !errors.IsNotFound(err) {
return false, err
} else if errors.IsNotFound(err) {
if _, err := i.getVM(node); err != nil {
if !errors.IsNotFound(err) {
return false, err
}
return false, nil
} else {
return true, nil
}
return true, nil
}

func (i *instanceManager) InstanceShutdown(ctx context.Context, node *v1.Node) (bool, error) {
vm, err := i.vmClient.Get(i.namespace, node.Name, metav1.GetOptions{})
vm, err := i.getVM(node)
if err != nil {
return false, err
}
return !vm.Status.Ready, nil
}

func (i *instanceManager) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloudprovider.InstanceMetadata, error) {
vm, err := i.vmClient.Get(i.namespace, node.Name, metav1.GetOptions{})
vm, err := i.getVM(node)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -69,6 +71,14 @@ func (i *instanceManager) InstanceMetadata(ctx context.Context, node *v1.Node) (
return meta, nil
}

func (i *instanceManager) getVM(node *v1.Node) (*kubevirtv1.VirtualMachine, error) {
nodeName := node.Name
if vmName, ok := i.nodeToVMName.Load(nodeName); ok {
nodeName = vmName.(string)
}
return i.vmClient.Get(i.namespace, nodeName, metav1.GetOptions{})
}

// getNodeAddresses return nodeAddresses only when the value of annotation `alpha.kubernetes.io/provided-node-ip` is not empty
func getNodeAddresses(node *v1.Node, vmi *kubevirtv1.VirtualMachineInstance) []v1.NodeAddress {
providedNodeIP, ok := node.Annotations[api.AnnotationAlphaProvidedIPAddr]
Expand Down
58 changes: 46 additions & 12 deletions pkg/controller/virtualmachineinstance/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,19 @@ package virtualmachineinstance

import (
"context"
"sync"

"github.com/harvester/harvester/pkg/builder"
"github.com/harvester/harvester/pkg/controller/master/virtualmachine"
ctlv1 "github.com/harvester/harvester/pkg/generated/controllers/kubevirt.io/v1"
ctlcorev1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
cloudproviderapi "k8s.io/cloud-provider/api"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
kubevirtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
)

const (
Expand All @@ -21,38 +24,69 @@ const (
// Register the controller is helping to re-sync harvester node topology labels to guest cluster nodes.
// when the migration is completed, the controller will re-sync the labels to guest cluster nodes.
// this is to make sure the node topology labels are always up-to-date.
func Register(ctx context.Context,
restClient kubernetes.Interface, nodes ctlcorev1.NodeController, vmis ctlv1.VirtualMachineInstanceController) {
func Register(
ctx context.Context,
restClient kubernetes.Interface,
nodes ctlcorev1.NodeController,
vmis ctlv1.VirtualMachineInstanceController,
kubevirtClient kubecli.KubevirtClient,
nodeToVMName *sync.Map,
namespace string,
) {
handler := &Handler{
vmis: vmis,
vmiCache: vmis.Cache(),
nodeCache: nodes.Cache(),
restClient: restClient,
vmis: vmis,
vmiCache: vmis.Cache(),
nodeCache: nodes.Cache(),
restClient: restClient,
kubevirtClient: kubevirtClient,
nodeToVMName: nodeToVMName,
}
vmis.OnChange(ctx, vmiControllerName, handler.OnVmiChanged)
}

type Handler struct {
vmis ctlv1.VirtualMachineInstanceController
vmiCache ctlv1.VirtualMachineInstanceCache
nodeCache ctlcorev1.NodeCache
restClient kubernetes.Interface
vmis ctlv1.VirtualMachineInstanceController
vmiCache ctlv1.VirtualMachineInstanceCache
nodeCache ctlcorev1.NodeCache
restClient kubernetes.Interface
kubevirtClient kubecli.KubevirtClient

nodeToVMName *sync.Map

namespace string
}

func (h *Handler) OnVmiChanged(_ string, vmi *kubevirtv1.VirtualMachineInstance) (*kubevirtv1.VirtualMachineInstance, error) {
// TODO: Add some unit tests for this controller

// only handle the migration completed vmi
if vmi == nil || vmi.DeletionTimestamp != nil ||
vmi.Annotations == nil || !isMigrationCompleted(vmi) {
vmi.Annotations == nil || vmi.Namespace != h.namespace || !isMigrationCompleted(vmi) {
return vmi, nil
}

if creator := vmi.Labels[builder.LabelKeyVirtualMachineCreator]; creator != virtualmachine.VirtualMachineCreatorNodeDriver {
return vmi, nil
}

node, err := h.nodeCache.Get(vmi.Name)
nodeName := vmi.Name
guestAgentInfo, err := h.kubevirtClient.VirtualMachineInstance(vmi.Namespace).GuestOsInfo(context.TODO(), vmi.Name)
if err != nil {
logrus.WithFields(logrus.Fields{
"name": vmi.Name,
"namespace": vmi.Namespace,
}).WithError(err).Error("failed to get guest agent info, fallback to use vmi name as node name")
} else {
logrus.WithFields(logrus.Fields{
"name": vmi.Name,
"namespace": vmi.Namespace,
"hostname": guestAgentInfo.Hostname,
}).Info("get agent info success, using hostname as node name")
nodeName = guestAgentInfo.Hostname
h.nodeToVMName.Store(nodeName, vmi.Name)
}

node, err := h.nodeCache.Get(nodeName)
if err != nil {
return vmi, err
}
Expand Down

0 comments on commit d64b2a0

Please sign in to comment.