Skip to content

Commit

Permalink
fix(webhook): revert runtime decision of service cidr
Browse files Browse the repository at this point in the history
It's overkill to retrieve the cluster's service CIDR in runtime since
it's rarely changed and almost the same in every Harvester deployment.
Revert the relevant code and let users to input the service CIDR string
from the webhook's command line argument to remain flexibility. The
default value is still `10.53.0.0/16`.

Signed-off-by: Zespre Chang <zespre.chang@suse.com>
  • Loading branch information
starbops committed Mar 11, 2024
1 parent 569c408 commit 69c6fd2
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 101 deletions.
2 changes: 1 addition & 1 deletion cmd/webhook/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func init() {
rootCmd.PersistentFlags().BoolVar(&logTrace, "trace", trace, "set logging level to trace")

rootCmd.Flags().StringVar(&name, "name", os.Getenv("VM_DHCP_AGENT_NAME"), "The name of the vm-dhcp-webhook instance")
rootCmd.Flags().StringVar(&serviceCIDR, "service-cidr", defaultServiceCIDR, "")
rootCmd.Flags().StringVar(&serviceCIDR, "service-cidr", defaultServiceCIDR, "The service CIDR that the cluster is currently using")

rootCmd.Flags().StringVar(&options.ControllerUsername, "controller-user", "harvester-vm-dhcp-controller", "The harvester controller username")
rootCmd.Flags().StringVar(&options.GarbageCollectionUsername, "gc-user", "system:serviceaccount:kube-system:generic-garbage-collector", "The system username that performs garbage collection")
Expand Down
9 changes: 3 additions & 6 deletions cmd/webhook/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (
"k8s.io/client-go/rest"

ctlcore "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core"
ctlcorev1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core/v1"
ctlcni "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io"
ctlcniv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io/v1"
ctlkubevirt "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/kubevirt.io"
Expand All @@ -26,9 +25,8 @@ type caches struct {
ippoolCache ctlnetworkv1.IPPoolCache
vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache

nadCache ctlcniv1.NetworkAttachmentDefinitionCache
nodeCache ctlcorev1.NodeCache
vmCache ctlkubevirtv1.VirtualMachineCache
nadCache ctlcniv1.NetworkAttachmentDefinitionCache
vmCache ctlkubevirtv1.VirtualMachineCache
}

func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*caches, error) {
Expand All @@ -51,7 +49,6 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*caches,
ippoolCache: networkFactory.Network().V1alpha1().IPPool().Cache(),
vmnetcfgCache: networkFactory.Network().V1alpha1().VirtualMachineNetworkConfig().Cache(),
nadCache: cniFactory.K8s().V1().NetworkAttachmentDefinition().Cache(),
nodeCache: coreFactory.Core().V1().Node().Cache(),
vmCache: kubevirtFactory.Kubevirt().V1().VirtualMachine().Cache(),
}

Expand All @@ -76,7 +73,7 @@ func run(ctx context.Context, cfg *rest.Config, options *config.Options) error {
webhookServer := server.NewWebhookServer(ctx, cfg, name, options)

if err := webhookServer.RegisterValidators(
ippool.NewValidator(serviceCIDR, c.nadCache, c.nodeCache, c.vmnetcfgCache),
ippool.NewValidator(serviceCIDR, c.nadCache, c.vmnetcfgCache),
vmnetcfg.NewValidator(c.ippoolCache),
); err != nil {
return err
Expand Down
6 changes: 3 additions & 3 deletions pkg/webhook/common.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
package webhook

const (
CreateErr = "could not create %s %s/%s because %w"
UpdateErr = "could not update %s %s/%s because %w"
DeleteErr = "could not delete %s %s/%s because %w"
CreateErr = "cannot create %s %s/%s because %w"
UpdateErr = "cannot update %s %s/%s because %w"
DeleteErr = "cannot delete %s %s/%s because %w"
)
6 changes: 3 additions & 3 deletions pkg/webhook/ippool/mutator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ func TestMutator_Create(t *testing.T) {
Exclude("172.19.64.129", "172.19.64.131", "172.19.64.132", "172.19.64.133", "172.19.64.134").Build(),
},
expected: output{
err: fmt.Errorf("could not create IPPool %s/%s because fail to assign ip for dhcp server", testIPPoolNamespace, testIPPoolName),
err: fmt.Errorf("cannot create IPPool %s/%s because fail to assign ip for dhcp server", testIPPoolNamespace, testIPPoolName),
},
},
{
Expand All @@ -204,7 +204,7 @@ func TestMutator_Create(t *testing.T) {
CIDR("172.19.64.128/32").Build(),
},
expected: output{
err: fmt.Errorf("could not create IPPool %s/%s because fail to assign ip for dhcp server", testIPPoolNamespace, testIPPoolName),
err: fmt.Errorf("cannot create IPPool %s/%s because fail to assign ip for dhcp server", testIPPoolNamespace, testIPPoolName),
},
},
{
Expand All @@ -214,7 +214,7 @@ func TestMutator_Create(t *testing.T) {
CIDR("172.19.64.128/31").Build(),
},
expected: output{
err: fmt.Errorf("could not create IPPool %s/%s because fail to assign ip for dhcp server", testIPPoolNamespace, testIPPoolName),
err: fmt.Errorf("cannot create IPPool %s/%s because fail to assign ip for dhcp server", testIPPoolNamespace, testIPPoolName),
},
},
{
Expand Down
48 changes: 4 additions & 44 deletions pkg/webhook/ippool/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,16 @@ package ippool

import (
"fmt"
"net"
"net/netip"
"strings"

"github.com/harvester/webhook/pkg/server/admission"
"github.com/rancher/wrangler/pkg/kv"
"github.com/sirupsen/logrus"
admissionregv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"

networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1"
ctlcorev1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/core/v1"
ctlcniv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/k8s.cni.cncf.io/v1"
ctlnetworkv1 "github.com/harvester/vm-dhcp-controller/pkg/generated/controllers/network.harvesterhci.io/v1alpha1"
"github.com/harvester/vm-dhcp-controller/pkg/util"
Expand All @@ -28,20 +24,17 @@ type Validator struct {
serviceCIDR string

nadCache ctlcniv1.NetworkAttachmentDefinitionCache
nodeCache ctlcorev1.NodeCache
vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache
}

func NewValidator(
serviceCIDR string,
nadCache ctlcniv1.NetworkAttachmentDefinitionCache,
nodeCache ctlcorev1.NodeCache,
vmnetcfgCache ctlnetworkv1.VirtualMachineNetworkConfigCache,
) *Validator {
return &Validator{
serviceCIDR: serviceCIDR,
nadCache: nadCache,
nodeCache: nodeCache,
vmnetcfgCache: vmnetcfgCache,
}
}
Expand Down Expand Up @@ -167,15 +160,7 @@ func (v *Validator) checkCIDR(cidr string) error {
return nil
}

sets := labels.Set{
util.ManagementNodeLabelKey: "true",
}
mgmtNodes, err := v.nodeCache.List(sets.AsSelector())
if err != nil {
return err
}

svcIPNet, err := consolidateServiceCIDRs(mgmtNodes, v.serviceCIDR)
svcIPNet, _, _, err := util.LoadCIDR(v.serviceCIDR)
if err != nil {
return err
}
Expand Down Expand Up @@ -239,15 +224,15 @@ func (v *Validator) checkServerIP(pi util.PoolInfo, unallocatables ...netip.Addr
}

if pi.ServerIPAddr.As4() == pi.NetworkIPAddr.As4() {
return fmt.Errorf("server ip %s cannot be the same as network ip", pi.ServerIPAddr)
return fmt.Errorf("server ip %s is the same as network ip", pi.ServerIPAddr)
}

if pi.ServerIPAddr.As4() == pi.BroadcastIPAddr.As4() {
return fmt.Errorf("server ip %s cannot be the same as broadcast ip", pi.ServerIPAddr)
return fmt.Errorf("server ip %s is the same as broadcast ip", pi.ServerIPAddr)
}

if pi.RouterIPAddr.IsValid() && pi.ServerIPAddr.As4() == pi.RouterIPAddr.As4() {
return fmt.Errorf("server ip %s cannot be the same as router ip", pi.ServerIPAddr)
return fmt.Errorf("server ip %s is the same as router ip", pi.ServerIPAddr)
}

for _, ip := range unallocatables {
Expand Down Expand Up @@ -299,28 +284,3 @@ func (v *Validator) checkVmNetCfgs(ipPool *networkv1.IPPool) error {
}
return nil
}

func consolidateServiceCIDRs(nodes []*corev1.Node, cidr string) (svcIPNet *net.IPNet, err error) {
for _, node := range nodes {
var serviceCIDR string
serviceCIDR, err = util.GetServiceCIDRFromNode(node)
if err != nil {
logrus.Warningf("could not find service cidr from node annoatation")
continue
}

svcIPNet, _, _, err = util.LoadCIDR(serviceCIDR)
if err != nil {
return
}
}

if svcIPNet == nil {
svcIPNet, _, _, err = util.LoadCIDR(cidr)
if err != nil {
return
}
}

return
}
Loading

0 comments on commit 69c6fd2

Please sign in to comment.