From 208a3d2e53a60ea0e43c69a7c269c48d4f5cf0d6 Mon Sep 17 00:00:00 2001 From: Zespre Chang Date: Mon, 26 Feb 2024 19:59:47 +0800 Subject: [PATCH] fix(webhook): check if pool overlaps service cidr If the input CIDR overlaps the cluster-wide service CIDR, the spawned agent could not contact kube-apiserver due to routing issue. Such requests should be rejected by the webhook. Signed-off-by: Zespre Chang --- chart/templates/rbac.yaml | 2 +- cmd/webhook/run.go | 13 +- pkg/codegen/main.go | 1 + pkg/data/data.go | 4 +- .../controllers/core/v1/interface.go | 4 + pkg/generated/controllers/core/v1/node.go | 376 ++++++++++++++++++ pkg/util/common.go | 4 +- pkg/util/fakeclient/ippool.go | 3 +- .../fakeclient/networkattachmentdefinition.go | 3 +- pkg/util/fakeclient/node.go | 65 +++ pkg/util/fakeclient/pod.go | 3 +- .../fakeclient/virtualmachinenetworkconfig.go | 3 +- pkg/util/network.go | 37 ++ pkg/webhook/ippool/validator.go | 58 ++- pkg/webhook/ippool/validator_test.go | 102 ++++- 15 files changed, 641 insertions(+), 37 deletions(-) create mode 100644 pkg/generated/controllers/core/v1/node.go create mode 100644 pkg/util/fakeclient/node.go diff --git a/chart/templates/rbac.yaml b/chart/templates/rbac.yaml index 4291970..9d75474 100644 --- a/chart/templates/rbac.yaml +++ b/chart/templates/rbac.yaml @@ -52,7 +52,7 @@ rules: resources: [ "ippools", "virtualmachinenetworkconfigs" ] verbs: [ "*" ] - apiGroups: [ "" ] - resources: [ "secrets" ] + resources: [ "nodes", "secrets" ] verbs: [ "watch", "list" ] - apiGroups: [ "k8s.cni.cncf.io" ] resources: [ "network-attachment-definitions" ] diff --git a/cmd/webhook/run.go b/cmd/webhook/run.go index b9aa09e..af1ddcc 100644 --- a/cmd/webhook/run.go +++ b/cmd/webhook/run.go @@ -9,6 +9,8 @@ import ( "github.com/sirupsen/logrus" "k8s.io/client-go/rest" + ctlcore "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core" + ctlcorev1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core/v1" ctlcni "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io" ctlcniv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io/v1" ctlkubevirt "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/kubevirt.io" @@ -24,13 +26,17 @@ type caches struct { ippoolCache ctlnetworkv1.IPPoolCache vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache - nadCache ctlcniv1.NetworkAttachmentDefinitionCache - vmCache ctlkubevirtv1.VirtualMachineCache + nadCache ctlcniv1.NetworkAttachmentDefinitionCache + nodeCache ctlcorev1.NodeCache + vmCache ctlkubevirtv1.VirtualMachineCache } func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*caches, error) { var starters []start.Starter + coreFactory := ctlcore.NewFactoryFromConfigOrDie(cfg) + starters = append(starters, coreFactory) + kubevirtFactory := ctlkubevirt.NewFactoryFromConfigOrDie(cfg) starters = append(starters, kubevirtFactory) @@ -45,6 +51,7 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*caches, ippoolCache: networkFactory.Network().V1alpha1().IPPool().Cache(), vmnetcfgCache: networkFactory.Network().V1alpha1().VirtualMachineNetworkConfig().Cache(), nadCache: cniFactory.K8s().V1().NetworkAttachmentDefinition().Cache(), + nodeCache: coreFactory.Core().V1().Node().Cache(), vmCache: kubevirtFactory.Kubevirt().V1().VirtualMachine().Cache(), } @@ -69,7 +76,7 @@ func run(ctx context.Context, cfg *rest.Config, options *config.Options) error { webhookServer := server.NewWebhookServer(ctx, cfg, name, options) if err := webhookServer.RegisterValidators( - ippool.NewValidator(c.nadCache, c.vmnetcfgCache), + ippool.NewValidator(c.nadCache, c.nodeCache, c.vmnetcfgCache), vmnetcfg.NewValidator(c.ippoolCache), ); err != nil { return err diff --git a/pkg/codegen/main.go b/pkg/codegen/main.go index c0e0bf0..73bb8f1 100644 --- a/pkg/codegen/main.go +++ b/pkg/codegen/main.go @@ -45,6 +45,7 @@ func main() { }, corev1.GroupName: { Types: []interface{}{ + corev1.Node{}, corev1.Pod{}, }, InformersPackage: "k8s.io/client-go/informers", diff --git a/pkg/data/data.go b/pkg/data/data.go index 20771be..18693d9 100644 --- a/pkg/data/data.go +++ b/pkg/data/data.go @@ -94,7 +94,7 @@ func chartCrdsNetworkHarvesterhciIo_ippoolsYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/crds/network.harvesterhci.io_ippools.yaml", size: 7603, mode: os.FileMode(420), modTime: time.Unix(1708057982, 0)} + info := bindataFileInfo{name: "chart/crds/network.harvesterhci.io_ippools.yaml", size: 7603, mode: os.FileMode(420), modTime: time.Unix(1708939633, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -114,7 +114,7 @@ func chartCrdsNetworkHarvesterhciIo_virtualmachinenetworkconfigsYaml() (*asset, return nil, err } - info := bindataFileInfo{name: "chart/crds/network.harvesterhci.io_virtualmachinenetworkconfigs.yaml", size: 4544, mode: os.FileMode(420), modTime: time.Unix(1708057982, 0)} + info := bindataFileInfo{name: "chart/crds/network.harvesterhci.io_virtualmachinenetworkconfigs.yaml", size: 4544, mode: os.FileMode(420), modTime: time.Unix(1708939633, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/pkg/generated/controllers/core/v1/interface.go b/pkg/generated/controllers/core/v1/interface.go index c08875a..957461d 100644 --- a/pkg/generated/controllers/core/v1/interface.go +++ b/pkg/generated/controllers/core/v1/interface.go @@ -30,6 +30,7 @@ func init() { } type Interface interface { + Node() NodeController Pod() PodController } @@ -43,6 +44,9 @@ type version struct { controllerFactory controller.SharedControllerFactory } +func (c *version) Node() NodeController { + return NewNodeController(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, "nodes", false, c.controllerFactory) +} func (c *version) Pod() PodController { return NewPodController(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, "pods", true, c.controllerFactory) } diff --git a/pkg/generated/controllers/core/v1/node.go b/pkg/generated/controllers/core/v1/node.go new file mode 100644 index 0000000..47bd226 --- /dev/null +++ b/pkg/generated/controllers/core/v1/node.go @@ -0,0 +1,376 @@ +/* +Copyright 2024 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + "github.com/rancher/lasso/pkg/client" + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/pkg/apply" + "github.com/rancher/wrangler/pkg/condition" + "github.com/rancher/wrangler/pkg/generic" + "github.com/rancher/wrangler/pkg/kv" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +type NodeHandler func(string, *v1.Node) (*v1.Node, error) + +type NodeController interface { + generic.ControllerMeta + NodeClient + + OnChange(ctx context.Context, name string, sync NodeHandler) + OnRemove(ctx context.Context, name string, sync NodeHandler) + Enqueue(name string) + EnqueueAfter(name string, duration time.Duration) + + Cache() NodeCache +} + +type NodeClient interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *metav1.DeleteOptions) error + Get(name string, options metav1.GetOptions) (*v1.Node, error) + List(opts metav1.ListOptions) (*v1.NodeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) +} + +type NodeCache interface { + Get(name string) (*v1.Node, error) + List(selector labels.Selector) ([]*v1.Node, error) + + AddIndexer(indexName string, indexer NodeIndexer) + GetByIndex(indexName, key string) ([]*v1.Node, error) +} + +type NodeIndexer func(obj *v1.Node) ([]string, error) + +type nodeController struct { + controller controller.SharedController + client *client.Client + gvk schema.GroupVersionKind + groupResource schema.GroupResource +} + +func NewNodeController(gvk schema.GroupVersionKind, resource string, namespaced bool, controller controller.SharedControllerFactory) NodeController { + c := controller.ForResourceKind(gvk.GroupVersion().WithResource(resource), gvk.Kind, namespaced) + return &nodeController{ + controller: c, + client: c.Client(), + gvk: gvk, + groupResource: schema.GroupResource{ + Group: gvk.Group, + Resource: resource, + }, + } +} + +func FromNodeHandlerToHandler(sync NodeHandler) generic.Handler { + return func(key string, obj runtime.Object) (ret runtime.Object, err error) { + var v *v1.Node + if obj == nil { + v, err = sync(key, nil) + } else { + v, err = sync(key, obj.(*v1.Node)) + } + if v == nil { + return nil, err + } + return v, err + } +} + +func (c *nodeController) Updater() generic.Updater { + return func(obj runtime.Object) (runtime.Object, error) { + newObj, err := c.Update(obj.(*v1.Node)) + if newObj == nil { + return nil, err + } + return newObj, err + } +} + +func UpdateNodeDeepCopyOnChange(client NodeClient, obj *v1.Node, handler func(obj *v1.Node) (*v1.Node, error)) (*v1.Node, error) { + if obj == nil { + return obj, nil + } + + copyObj := obj.DeepCopy() + newObj, err := handler(copyObj) + if newObj != nil { + copyObj = newObj + } + if obj.ResourceVersion == copyObj.ResourceVersion && !equality.Semantic.DeepEqual(obj, copyObj) { + return client.Update(copyObj) + } + + return copyObj, err +} + +func (c *nodeController) AddGenericHandler(ctx context.Context, name string, handler generic.Handler) { + c.controller.RegisterHandler(ctx, name, controller.SharedControllerHandlerFunc(handler)) +} + +func (c *nodeController) AddGenericRemoveHandler(ctx context.Context, name string, handler generic.Handler) { + c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), handler)) +} + +func (c *nodeController) OnChange(ctx context.Context, name string, sync NodeHandler) { + c.AddGenericHandler(ctx, name, FromNodeHandlerToHandler(sync)) +} + +func (c *nodeController) OnRemove(ctx context.Context, name string, sync NodeHandler) { + c.AddGenericHandler(ctx, name, generic.NewRemoveHandler(name, c.Updater(), FromNodeHandlerToHandler(sync))) +} + +func (c *nodeController) Enqueue(name string) { + c.controller.Enqueue("", name) +} + +func (c *nodeController) EnqueueAfter(name string, duration time.Duration) { + c.controller.EnqueueAfter("", name, duration) +} + +func (c *nodeController) Informer() cache.SharedIndexInformer { + return c.controller.Informer() +} + +func (c *nodeController) GroupVersionKind() schema.GroupVersionKind { + return c.gvk +} + +func (c *nodeController) Cache() NodeCache { + return &nodeCache{ + indexer: c.Informer().GetIndexer(), + resource: c.groupResource, + } +} + +func (c *nodeController) Create(obj *v1.Node) (*v1.Node, error) { + result := &v1.Node{} + return result, c.client.Create(context.TODO(), "", obj, result, metav1.CreateOptions{}) +} + +func (c *nodeController) Update(obj *v1.Node) (*v1.Node, error) { + result := &v1.Node{} + return result, c.client.Update(context.TODO(), "", obj, result, metav1.UpdateOptions{}) +} + +func (c *nodeController) UpdateStatus(obj *v1.Node) (*v1.Node, error) { + result := &v1.Node{} + return result, c.client.UpdateStatus(context.TODO(), "", obj, result, metav1.UpdateOptions{}) +} + +func (c *nodeController) Delete(name string, options *metav1.DeleteOptions) error { + if options == nil { + options = &metav1.DeleteOptions{} + } + return c.client.Delete(context.TODO(), "", name, *options) +} + +func (c *nodeController) Get(name string, options metav1.GetOptions) (*v1.Node, error) { + result := &v1.Node{} + return result, c.client.Get(context.TODO(), "", name, result, options) +} + +func (c *nodeController) List(opts metav1.ListOptions) (*v1.NodeList, error) { + result := &v1.NodeList{} + return result, c.client.List(context.TODO(), "", result, opts) +} + +func (c *nodeController) Watch(opts metav1.ListOptions) (watch.Interface, error) { + return c.client.Watch(context.TODO(), "", opts) +} + +func (c *nodeController) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (*v1.Node, error) { + result := &v1.Node{} + return result, c.client.Patch(context.TODO(), "", name, pt, data, result, metav1.PatchOptions{}, subresources...) +} + +type nodeCache struct { + indexer cache.Indexer + resource schema.GroupResource +} + +func (c *nodeCache) Get(name string) (*v1.Node, error) { + obj, exists, err := c.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(c.resource, name) + } + return obj.(*v1.Node), nil +} + +func (c *nodeCache) List(selector labels.Selector) (ret []*v1.Node, err error) { + + err = cache.ListAll(c.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Node)) + }) + + return ret, err +} + +func (c *nodeCache) AddIndexer(indexName string, indexer NodeIndexer) { + utilruntime.Must(c.indexer.AddIndexers(map[string]cache.IndexFunc{ + indexName: func(obj interface{}) (strings []string, e error) { + return indexer(obj.(*v1.Node)) + }, + })) +} + +func (c *nodeCache) GetByIndex(indexName, key string) (result []*v1.Node, err error) { + objs, err := c.indexer.ByIndex(indexName, key) + if err != nil { + return nil, err + } + result = make([]*v1.Node, 0, len(objs)) + for _, obj := range objs { + result = append(result, obj.(*v1.Node)) + } + return result, nil +} + +type NodeStatusHandler func(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) + +type NodeGeneratingHandler func(obj *v1.Node, status v1.NodeStatus) ([]runtime.Object, v1.NodeStatus, error) + +func RegisterNodeStatusHandler(ctx context.Context, controller NodeController, condition condition.Cond, name string, handler NodeStatusHandler) { + statusHandler := &nodeStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, FromNodeHandlerToHandler(statusHandler.sync)) +} + +func RegisterNodeGeneratingHandler(ctx context.Context, controller NodeController, apply apply.Apply, + condition condition.Cond, name string, handler NodeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &nodeGeneratingHandler{ + NodeGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterNodeStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type nodeStatusHandler struct { + client NodeClient + condition condition.Cond + handler NodeStatusHandler +} + +func (a *nodeStatusHandler) sync(key string, obj *v1.Node) (*v1.Node, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type nodeGeneratingHandler struct { + NodeGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string +} + +func (a *nodeGeneratingHandler) Remove(key string, obj *v1.Node) (*v1.Node, error) { + if obj != nil { + return obj, nil + } + + obj = &v1.Node{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +func (a *nodeGeneratingHandler) Handle(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.NodeGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + + return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) +} diff --git a/pkg/util/common.go b/pkg/util/common.go index 9c3db89..72158bc 100644 --- a/pkg/util/common.go +++ b/pkg/util/common.go @@ -10,7 +10,9 @@ const ( ExcludedMark = "EXCLUDED" ReservedMark = "RESERVED" - AgentSuffixName = "agent" + AgentSuffixName = "agent" + NodeArgsAnnotationKey = "rke2.io/node-args" + ServiceCIDRFlag = "--service-cidr" ) func agentConcatName(name ...string) string { diff --git a/pkg/util/fakeclient/ippool.go b/pkg/util/fakeclient/ippool.go index dddef2d..7c3a6d0 100644 --- a/pkg/util/fakeclient/ippool.go +++ b/pkg/util/fakeclient/ippool.go @@ -52,7 +52,8 @@ func (c IPPoolCache) List(namespace string, selector labels.Selector) ([]*networ } result := make([]*networkv1.IPPool, 0, len(list.Items)) for _, ipPool := range list.Items { - result = append(result, &ipPool) + i := ipPool + result = append(result, &i) } return result, err } diff --git a/pkg/util/fakeclient/networkattachmentdefinition.go b/pkg/util/fakeclient/networkattachmentdefinition.go index be7bab4..4f4f499 100644 --- a/pkg/util/fakeclient/networkattachmentdefinition.go +++ b/pkg/util/fakeclient/networkattachmentdefinition.go @@ -49,7 +49,8 @@ func (c NetworkAttachmentDefinitionCache) List(namespace string, selector labels } result := make([]*cniv1.NetworkAttachmentDefinition, 0, len(list.Items)) for _, nad := range list.Items { - result = append(result, &nad) + n := nad + result = append(result, &n) } return result, err } diff --git a/pkg/util/fakeclient/node.go b/pkg/util/fakeclient/node.go new file mode 100644 index 0000000..e202a1b --- /dev/null +++ b/pkg/util/fakeclient/node.go @@ -0,0 +1,65 @@ +package fakeclient + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + typecorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + ctlcorev1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core/v1" +) + +type NodeClient func() typecorev1.NodeInterface + +func (c NodeClient) Update(node *corev1.Node) (*corev1.Node, error) { + panic("implement me") +} +func (c NodeClient) Get(name string, options metav1.GetOptions) (*corev1.Node, error) { + panic("implement me") +} +func (c NodeClient) Create(node *corev1.Node) (*corev1.Node, error) { + panic("implement me") +} +func (c NodeClient) Delete(name string, options *metav1.DeleteOptions) error { + panic("implement me") +} +func (c NodeClient) List(opts metav1.ListOptions) (*corev1.NodeList, error) { + panic("implement me") +} +func (c NodeClient) UpdateStatus(node *corev1.Node) (*corev1.Node, error) { + panic("implement me") +} +func (c NodeClient) Watch(opts metav1.ListOptions) (watch.Interface, error) { + panic("implement me") +} +func (c NodeClient) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Node, err error) { + panic("implement me") +} + +type NodeCache func() typecorev1.NodeInterface + +func (c NodeCache) Get(name string) (*corev1.Node, error) { + panic("implement me") +} +func (c NodeCache) List(selector labels.Selector) ([]*corev1.Node, error) { + list, err := c().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + result := make([]*corev1.Node, 0, len(list.Items)) + for _, node := range list.Items { + n := node + result = append(result, &n) + } + return result, err +} +func (c NodeCache) AddIndexer(indexName string, indexer ctlcorev1.NodeIndexer) { + panic("implement me") +} +func (c NodeCache) GetByIndex(indexName, key string) ([]*corev1.Node, error) { + panic("implement me") +} diff --git a/pkg/util/fakeclient/pod.go b/pkg/util/fakeclient/pod.go index ed4e445..c9d126e 100644 --- a/pkg/util/fakeclient/pod.go +++ b/pkg/util/fakeclient/pod.go @@ -52,7 +52,8 @@ func (c PodCache) List(namespace string, selector labels.Selector) ([]*corev1.Po } result := make([]*corev1.Pod, 0, len(list.Items)) for _, pod := range list.Items { - result = append(result, &pod) + p := pod + result = append(result, &p) } return result, err } diff --git a/pkg/util/fakeclient/virtualmachinenetworkconfig.go b/pkg/util/fakeclient/virtualmachinenetworkconfig.go index 2d45dcc..2440d41 100644 --- a/pkg/util/fakeclient/virtualmachinenetworkconfig.go +++ b/pkg/util/fakeclient/virtualmachinenetworkconfig.go @@ -52,7 +52,8 @@ func (c VirtualMachineNetworkConfigCache) List(namespace string, selector labels } result := make([]*networkv1.VirtualMachineNetworkConfig, 0, len(list.Items)) for _, vmNetCfg := range list.Items { - result = append(result, &vmNetCfg) + v := vmNetCfg + result = append(result, &v) } return result, err } diff --git a/pkg/util/network.go b/pkg/util/network.go index 69614fe..e4db840 100644 --- a/pkg/util/network.go +++ b/pkg/util/network.go @@ -1,10 +1,13 @@ package util import ( + "encoding/json" "fmt" "net" "net/netip" + corev1 "k8s.io/api/core/v1" + networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" ) @@ -18,6 +21,40 @@ type PoolInfo struct { RouterIPAddr netip.Addr } +func GetServiceCIDRFromNode(node *corev1.Node) (string, error) { + if node.Annotations == nil { + return "", fmt.Errorf("service CIDR not found for node %s", node.Name) + } + + nodeArgs, ok := node.Annotations[NodeArgsAnnotationKey] + if !ok { + return "", fmt.Errorf("annotation %s not found for node %s", NodeArgsAnnotationKey, node.Name) + } + + var argList []string + if err := json.Unmarshal([]byte(nodeArgs), &argList); err != nil { + return "", err + } + + var serviceCIDRIndex int + for i, val := range argList { + if val == ServiceCIDRFlag { + // The "rke2.io/node-args" annotation in node objects contains various node arguments. + // For example, '[...,"--cluster-cidr","10.52.0.0/16","--service-cidr","10.53.0.0/16", ...]' + // What we need here is the value of the "--service-cidr" flag. + // It could be accessed by accumulating the flag index by one. + serviceCIDRIndex = i + 1 + break + } + } + + if serviceCIDRIndex == 0 || serviceCIDRIndex >= len(argList) { + return "", fmt.Errorf("serviceCIDR not found for node %s", node.Name) + } + + return argList[serviceCIDRIndex], nil +} + func LoadCIDR(cidr string) (ipNet *net.IPNet, networkIPAddr netip.Addr, broadcastIPAddr netip.Addr, err error) { _, ipNet, err = net.ParseCIDR(cidr) if err != nil { diff --git a/pkg/webhook/ippool/validator.go b/pkg/webhook/ippool/validator.go index 131fc9d..3787920 100644 --- a/pkg/webhook/ippool/validator.go +++ b/pkg/webhook/ippool/validator.go @@ -9,9 +9,11 @@ import ( "github.com/rancher/wrangler/pkg/kv" "github.com/sirupsen/logrus" admissionregv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" + ctlcorev1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core/v1" ctlcniv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io/v1" ctlnetworkv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/network.harvesterhci.io/v1alpha1" "github.com/harvester/vm-dhcp-controller/pkg/util" @@ -22,12 +24,18 @@ type Validator struct { admission.DefaultValidator nadCache ctlcniv1.NetworkAttachmentDefinitionCache + nodeCache ctlcorev1.NodeCache vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache } -func NewValidator(nadCache ctlcniv1.NetworkAttachmentDefinitionCache, vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache) *Validator { +func NewValidator( + nadCache ctlcniv1.NetworkAttachmentDefinitionCache, + nodeCache ctlcorev1.NodeCache, + vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache, +) *Validator { return &Validator{ nadCache: nadCache, + nodeCache: nodeCache, vmnetcfgCache: vmnetcfgCache, } } @@ -46,6 +54,10 @@ func (v *Validator) Create(_ *admission.Request, newObj runtime.Object) error { return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } + if err := v.checkCIDR(ipPool.Spec.IPv4Config.CIDR); err != nil { + return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + } + if err := v.checkPoolRange(poolInfo); err != nil { return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } @@ -73,7 +85,7 @@ func (v *Validator) Update(_ *admission.Request, _, newObj runtime.Object) error // sanity check poolInfo, err := util.LoadPool(ipPool) if err != nil { - return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + return fmt.Errorf(webhook.UpdateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } var allocatedIPAddrList []netip.Addr @@ -82,19 +94,23 @@ func (v *Validator) Update(_ *admission.Request, _, newObj runtime.Object) error } if err := v.checkNAD(ipPool.Spec.NetworkName); err != nil { - return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + return fmt.Errorf(webhook.UpdateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + } + + if err := v.checkCIDR(ipPool.Spec.IPv4Config.CIDR); err != nil { + return fmt.Errorf(webhook.UpdateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } if err := v.checkPoolRange(poolInfo); err != nil { - return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + return fmt.Errorf(webhook.UpdateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } if err := v.checkServerIP(poolInfo, allocatedIPAddrList...); err != nil { - return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + return fmt.Errorf(webhook.UpdateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } if err := v.checkRouter(poolInfo); err != nil { - return fmt.Errorf(webhook.CreateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) + return fmt.Errorf(webhook.UpdateErr, "IPPool", ipPool.Namespace, ipPool.Name, err) } return nil @@ -136,6 +152,36 @@ func (v *Validator) checkNAD(namespacedName string) error { return err } +func (v *Validator) checkCIDR(cidr string) error { + ipNet, _, _, err := util.LoadCIDR(cidr) + if err != nil { + return nil + } + + nodes, err := v.nodeCache.List(labels.Everything()) + if err != nil { + return err + } + + for _, node := range nodes { + serviceCIDR, err := util.GetServiceCIDRFromNode(node) + if err != nil { + return err + } + + svcIPNet, _, _, err := util.LoadCIDR(serviceCIDR) + if err != nil { + return err + } + + if ipNet.Contains(svcIPNet.IP) || svcIPNet.Contains(ipNet.IP) { + return fmt.Errorf("cidr %s overlaps service cidr %s", cidr, serviceCIDR) + } + } + + return nil +} + func (v *Validator) checkPoolRange(pi util.PoolInfo) error { if pi.StartIPAddr.IsValid() { if !pi.IPNet.Contains(pi.StartIPAddr.AsSlice()) { diff --git a/pkg/webhook/ippool/validator_test.go b/pkg/webhook/ippool/validator_test.go index 309b294..1da92ef 100644 --- a/pkg/webhook/ippool/validator_test.go +++ b/pkg/webhook/ippool/validator_test.go @@ -7,11 +7,15 @@ import ( "github.com/harvester/webhook/pkg/server/admission" cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + k8sfake "k8s.io/client-go/kubernetes/fake" networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" "github.com/harvester/vm-dhcp-controller/pkg/controller/ippool" "github.com/harvester/vm-dhcp-controller/pkg/generated/clientset/versioned/fake" + "github.com/harvester/vm-dhcp-controller/pkg/util" "github.com/harvester/vm-dhcp-controller/pkg/util/fakeclient" ) @@ -21,6 +25,8 @@ const ( testIPPoolNamespace = testNADNamespace testIPPoolName = testNADName testCIDR = "192.168.0.0/24" + testCIDROverlap = "10.53.0.0/24" + testServiceCIDR = "10.53.0.0/16" testServerIPOutOfRange = "192.168.100.2" testRouter = "192.168.0.1" testNetworkName = testNADNamespace + "/" + testNADName @@ -38,6 +44,7 @@ func TestValidator_Create(t *testing.T) { type input struct { ipPool *networkv1.IPPool nad *cniv1.NetworkAttachmentDefinition + node *corev1.Node } type output struct { @@ -280,6 +287,26 @@ func TestValidator_Create(t *testing.T) { err: fmt.Errorf("could not create IPPool default/net-1 because network-attachment-definitions.k8s.cni.cncf.io \"%s\" not found", "nonexist"), }, }, + { + name: "cidr overlaps cluster's service cidr", + given: input{ + ipPool: newTestIPPoolBuilder(). + CIDR(testCIDROverlap). + NetworkName(testNetworkName).Build(), + nad: newTestNetworkAttachmentDefinitionBuilder().Build(), + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + util.NodeArgsAnnotationKey: fmt.Sprintf("[\"%s\", \"%s\"]", util.ServiceCIDRFlag, testServiceCIDR), + }, + Name: "node-0", + }, + }, + }, + expected: output{ + err: fmt.Errorf("could not create IPPool %s/%s because cidr %s overlaps service cidr %s", testIPPoolNamespace, testIPPoolName, testCIDROverlap, testServiceCIDR), + }, + }, } nadGVR := schema.GroupVersionResource{ @@ -293,9 +320,16 @@ func TestValidator_Create(t *testing.T) { err := clientset.Tracker().Create(nadGVR, tc.given.nad, tc.given.nad.Namespace) assert.Nil(t, err, "mock resource should add into fake controller tracker") + k8sclientset := k8sfake.NewSimpleClientset() + if tc.given.node != nil { + err := k8sclientset.Tracker().Add(tc.given.node) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + } + nadCache := fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions) + nodeCache := fakeclient.NodeCache(k8sclientset.CoreV1().Nodes) vmnetCache := fakeclient.VirtualMachineNetworkConfigCache(clientset.NetworkV1alpha1().VirtualMachineNetworkConfigs) - validator := NewValidator(nadCache, vmnetCache) + validator := NewValidator(nadCache, nodeCache, vmnetCache) err = validator.Create(&admission.Request{}, tc.given.ipPool) @@ -312,6 +346,7 @@ func TestValidator_Update(t *testing.T) { oldIPPool *networkv1.IPPool newIPPool *networkv1.IPPool nad *cniv1.NetworkAttachmentDefinition + node *corev1.Node } type output struct { @@ -351,7 +386,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because server ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, testServerIPOutOfRange), + err: fmt.Errorf("could not update IPPool %s/%s because server ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, testServerIPOutOfRange), }, }, { @@ -368,7 +403,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because server ip %s cannot be the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), + err: fmt.Errorf("could not update IPPool %s/%s because server ip %s cannot be the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), }, }, { @@ -385,7 +420,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because server ip %s cannot be the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), + err: fmt.Errorf("could not update IPPool %s/%s because server ip %s cannot be the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), }, }, { @@ -404,7 +439,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because server ip %s cannot be the same as router ip", testIPPoolNamespace, testIPPoolName, "192.168.0.254"), + err: fmt.Errorf("could not update IPPool %s/%s because server ip %s cannot be the same as router ip", testIPPoolNamespace, testIPPoolName, "192.168.0.254"), }, }, { @@ -424,7 +459,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because server ip %s is already allocated", testIPPoolNamespace, testIPPoolName, "192.168.0.100"), + err: fmt.Errorf("could not update IPPool %s/%s because server ip %s is already allocated", testIPPoolNamespace, testIPPoolName, "192.168.0.100"), }, }, { @@ -437,7 +472,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because ParseAddr(\"%s\"): IPv4 field has value >255", testIPPoolNamespace, testIPPoolName, "192.168.0.1000"), + err: fmt.Errorf("could not update IPPool %s/%s because ParseAddr(\"%s\"): IPv4 field has value >255", testIPPoolNamespace, testIPPoolName, "192.168.0.1000"), }, }, { @@ -450,7 +485,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because router ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, "192.168.1.1"), + err: fmt.Errorf("could not update IPPool %s/%s because router ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, "192.168.1.1"), }, }, { @@ -463,7 +498,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because router ip %s is the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), + err: fmt.Errorf("could not update IPPool %s/%s because router ip %s is the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), }, }, { @@ -476,7 +511,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because router ip %s is the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), + err: fmt.Errorf("could not update IPPool %s/%s because router ip %s is the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), }, }, { @@ -489,7 +524,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because ParseAddr(\"%s\"): IPv4 field has value >255", testIPPoolNamespace, testIPPoolName, "192.168.0.1000"), + err: fmt.Errorf("could not update IPPool %s/%s because ParseAddr(\"%s\"): IPv4 field has value >255", testIPPoolNamespace, testIPPoolName, "192.168.0.1000"), }, }, { @@ -502,7 +537,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because start ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, "192.168.1.100"), + err: fmt.Errorf("could not update IPPool %s/%s because start ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, "192.168.1.100"), }, }, { @@ -515,7 +550,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because start ip %s is the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), + err: fmt.Errorf("could not update IPPool %s/%s because start ip %s is the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), }, }, { @@ -528,7 +563,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because start ip %s is the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), + err: fmt.Errorf("could not update IPPool %s/%s because start ip %s is the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), }, }, { @@ -541,7 +576,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because ParseAddr(\"%s\"): IPv4 field has value >255", testIPPoolNamespace, testIPPoolName, "192.168.0.1000"), + err: fmt.Errorf("could not update IPPool %s/%s because ParseAddr(\"%s\"): IPv4 field has value >255", testIPPoolNamespace, testIPPoolName, "192.168.0.1000"), }, }, { @@ -554,7 +589,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because end ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, "192.168.1.100"), + err: fmt.Errorf("could not update IPPool %s/%s because end ip %s is not within subnet", testIPPoolNamespace, testIPPoolName, "192.168.1.100"), }, }, { @@ -567,7 +602,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because end ip %s is the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), + err: fmt.Errorf("could not update IPPool %s/%s because end ip %s is the same as network ip", testIPPoolNamespace, testIPPoolName, "192.168.0.0"), }, }, { @@ -580,7 +615,7 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool %s/%s because end ip %s is the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), + err: fmt.Errorf("could not update IPPool %s/%s because end ip %s is the same as broadcast ip", testIPPoolNamespace, testIPPoolName, "192.168.0.255"), }, }, { @@ -592,7 +627,27 @@ func TestValidator_Update(t *testing.T) { nad: newTestNetworkAttachmentDefinitionBuilder().Build(), }, expected: output{ - err: fmt.Errorf("could not create IPPool default/net-1 because network-attachment-definitions.k8s.cni.cncf.io \"%s\" not found", "nonexist"), + err: fmt.Errorf("could not update IPPool default/net-1 because network-attachment-definitions.k8s.cni.cncf.io \"%s\" not found", "nonexist"), + }, + }, + { + name: "cidr overlaps cluster's service cidr", + given: input{ + newIPPool: newTestIPPoolBuilder(). + CIDR(testCIDROverlap). + NetworkName(testNetworkName).Build(), + nad: newTestNetworkAttachmentDefinitionBuilder().Build(), + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + util.NodeArgsAnnotationKey: fmt.Sprintf("[\"%s\", \"%s\"]", util.ServiceCIDRFlag, testServiceCIDR), + }, + Name: "node-0", + }, + }, + }, + expected: output{ + err: fmt.Errorf("could not update IPPool %s/%s because cidr %s overlaps service cidr %s", testIPPoolNamespace, testIPPoolName, testCIDROverlap, testServiceCIDR), }, }, } @@ -608,9 +663,16 @@ func TestValidator_Update(t *testing.T) { err := clientset.Tracker().Create(nadGVR, tc.given.nad, tc.given.nad.Namespace) assert.Nil(t, err, "mock resource should add into fake controller tracker") + k8sclientset := k8sfake.NewSimpleClientset() + if tc.given.node != nil { + err := k8sclientset.Tracker().Add(tc.given.node) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + } + nadCache := fakeclient.NetworkAttachmentDefinitionCache(clientset.K8sCniCncfIoV1().NetworkAttachmentDefinitions) + nodeCache := fakeclient.NodeCache(k8sclientset.CoreV1().Nodes) vmnetCache := fakeclient.VirtualMachineNetworkConfigCache(clientset.NetworkV1alpha1().VirtualMachineNetworkConfigs) - validator := NewValidator(nadCache, vmnetCache) + validator := NewValidator(nadCache, nodeCache, vmnetCache) err = validator.Update(&admission.Request{}, tc.given.oldIPPool, tc.given.newIPPool)