diff --git a/pkg/agent/ippool/ippool.go b/pkg/agent/ippool/ippool.go index d9b262d..ef7a227 100644 --- a/pkg/agent/ippool/ippool.go +++ b/pkg/agent/ippool/ippool.go @@ -1,8 +1,6 @@ package ippool import ( - "fmt" - "github.com/sirupsen/logrus" networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" @@ -10,8 +8,13 @@ import ( ) func (c *Controller) Update(ipPool *networkv1.IPPool) error { + if !networkv1.CacheReady.IsTrue(ipPool) { + logrus.Warningf("ippool %s/%s is not ready", ipPool.Namespace, ipPool.Name) + return nil + } if ipPool.Status.IPv4 == nil { - return fmt.Errorf("ippool status has no records") + logrus.Warningf("ippool %s/%s status has no records", ipPool.Namespace, ipPool.Name) + return nil } allocated := ipPool.Status.IPv4.Allocated filterExcluded(allocated) diff --git a/pkg/controller/ippool/controller_test.go b/pkg/controller/ippool/controller_test.go index fc4c11d..44c5ea8 100644 --- a/pkg/controller/ippool/controller_test.go +++ b/pkg/controller/ippool/controller_test.go @@ -7,11 +7,9 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8sfake "k8s.io/client-go/kubernetes/fake" - networkv1 "github.com/harvester/vm-dhcp-controller/pkg/apis/network.harvesterhci.io/v1alpha1" "github.com/harvester/vm-dhcp-controller/pkg/cache" "github.com/harvester/vm-dhcp-controller/pkg/config" "github.com/harvester/vm-dhcp-controller/pkg/generated/clientset/versioned/fake" @@ -87,145 +85,184 @@ func newTestNetworkAttachmentDefinitionBuilder() *NetworkAttachmentDefinitionBui } func TestHandler_OnChange(t *testing.T) { - type input struct { - key string - ipAllocator *ipam.IPAllocator - ipPool *networkv1.IPPool - pods []*corev1.Pod - } - - type output struct { - ipPool *networkv1.IPPool - pods []*corev1.Pod - err error - } - - testCases := []struct { - name string - given input - expected output - }{ - { - name: "new ippool", - given: input{ - key: testIPPoolNamespace + "/" + testIPPoolName, - ipAllocator: newTestIPAllocatorBuilder(). - Build(), - ipPool: newTestIPPoolBuilder(). - Build(), - }, - expected: output{ - ipPool: newTestIPPoolBuilder(). - StoppedCondition(corev1.ConditionFalse, "", ""). - CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", ""). - Build(), - }, - }, - { - name: "ippool with ipam initialized", - given: input{ - key: testIPPoolNamespace + "/" + testIPPoolName, - ipAllocator: newTestIPAllocatorBuilder(). - IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). - Build(), - ipPool: newTestIPPoolBuilder(). - ServerIP(testServerIP). - CIDR(testCIDR). - PoolRange(testStartIP, testEndIP). - NetworkName(testNetworkName). - Build(), - }, - expected: output{ - ipPool: newTestIPPoolBuilder(). - ServerIP(testServerIP). - CIDR(testCIDR). - PoolRange(testStartIP, testEndIP). - NetworkName(testNetworkName). - Available(100). - Used(0). - StoppedCondition(corev1.ConditionFalse, "", ""). - Build(), - }, - }, - { - name: "pause ippool", - given: input{ - key: testIPPoolNamespace + "/" + testIPPoolName, - ipAllocator: newTestIPAllocatorBuilder(). - Build(), - ipPool: newTestIPPoolBuilder(). - Paused(). - AgentPodRef("default", "default-ippool-1-agent", testImage, ""). - Build(), - pods: []*corev1.Pod{ - newTestPodBuilder(). - Build(), - }, - }, - expected: output{ - ipPool: newTestIPPoolBuilder(). - Paused(). - StoppedCondition(corev1.ConditionTrue, "", ""). - Build(), - }, - }, - { - name: "resume ippool", - given: input{ - key: testIPPoolNamespace + "/" + testIPPoolName, - ipAllocator: newTestIPAllocatorBuilder(). - Build(), - ipPool: newTestIPPoolBuilder(). - UnPaused(). - Build(), - }, - expected: output{ - ipPool: newTestIPPoolBuilder(). - UnPaused(). - StoppedCondition(corev1.ConditionFalse, "", ""). - CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", ""). - Build(), + t.Run("new ippool", func(t *testing.T) { + key := testIPPoolNamespace + "/" + testIPPoolName + givenIPAllocator := newTestIPAllocatorBuilder().Build() + givenIPPool := newTestIPPoolBuilder().Build() + + expectedIPPool := newTestIPPoolBuilder(). + StoppedCondition(corev1.ConditionFalse, "", ""). + CacheReadyCondition(corev1.ConditionFalse, "NotInitialized", "").Build() + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Add(givenIPPool) + if err != nil { + t.Fatal(err) + } + + handler := Handler{ + agentNamespace: "default", + agentImage: &config.Image{ + Repository: "rancher/harvester-vm-dhcp-controller", + Tag: "main", }, - }, - } + ipAllocator: givenIPAllocator, + ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + } + + ipPool, err := handler.OnChange(key, givenIPPool) + assert.Nil(t, err) + + SanitizeStatus(&expectedIPPool.Status) + SanitizeStatus(&ipPool.Status) + + assert.Equal(t, expectedIPPool, ipPool) + }) + + t.Run("ippool with ipam initialized", func(t *testing.T) { + key := testIPPoolNamespace + "/" + testIPPoolName + givenIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Build() + givenIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() + + expectedIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + Available(100). + Used(0). + CacheReadyCondition(corev1.ConditionTrue, "", ""). + StoppedCondition(corev1.ConditionFalse, "", "").Build() - for _, tc := range testCases { clientset := fake.NewSimpleClientset() - err := clientset.Tracker().Add(tc.given.ipPool) + err := clientset.Tracker().Add(givenIPPool) if err != nil { t.Fatal(err) } - var pods []runtime.Object - for _, pod := range tc.given.pods { - pods = append(pods, pod) + handler := Handler{ + agentNamespace: "default", + agentImage: &config.Image{ + Repository: "rancher/harvester-vm-dhcp-controller", + Tag: "main", + }, + ipAllocator: givenIPAllocator, + metricsAllocator: metrics.New(), + ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), } - k8sclientset := k8sfake.NewSimpleClientset(pods...) + + ipPool, err := handler.OnChange(key, givenIPPool) + assert.Nil(t, err) + + SanitizeStatus(&expectedIPPool.Status) + SanitizeStatus(&ipPool.Status) + + assert.Equal(t, expectedIPPool, ipPool) + }) + + t.Run("pause ippool", func(t *testing.T) { + key := testIPPoolNamespace + "/" + testIPPoolName + givenIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP).Build() + givenIPPool := newTestIPPoolBuilder(). + NetworkName(testNetworkName). + Paused(). + AgentPodRef(testPodNamespace, testPodName, testImage, "").Build() + givenPod := newTestPodBuilder().Build() + + expectedIPAllocator := newTestIPAllocatorBuilder().Build() + expectedIPPool := newTestIPPoolBuilder(). + NetworkName(testNetworkName). + Paused(). + StoppedCondition(corev1.ConditionTrue, "", "").Build() + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Add(givenIPPool) + if err != nil { + t.Fatal(err) + } + + k8sclientset := k8sfake.NewSimpleClientset() + err = k8sclientset.Tracker().Add(givenPod) + assert.Nil(t, err, "mock resource should add into fake controller tracker") + handler := Handler{ agentNamespace: "default", agentImage: &config.Image{ Repository: "rancher/harvester-vm-dhcp-controller", Tag: "main", }, + ipAllocator: givenIPAllocator, cacheAllocator: cache.New(), - ipAllocator: tc.given.ipAllocator, metricsAllocator: metrics.New(), ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), podClient: fakeclient.PodClient(k8sclientset.CoreV1().Pods), } - var actual output + ipPool, err := handler.OnChange(key, givenIPPool) + assert.Nil(t, err) - actual.ipPool, actual.err = handler.OnChange(tc.given.key, tc.given.ipPool) - assert.Nil(t, actual.err) + SanitizeStatus(&expectedIPPool.Status) + SanitizeStatus(&ipPool.Status) - SanitizeStatus(&tc.expected.ipPool.Status) - SanitizeStatus(&actual.ipPool.Status) + assert.Equal(t, expectedIPPool, ipPool) - assert.Equal(t, tc.expected.ipPool, actual.ipPool, tc.name) + assert.Equal(t, expectedIPAllocator, handler.ipAllocator) + + _, err = handler.podClient.Get(testPodNamespace, testPodName, metav1.GetOptions{}) + assert.Equal(t, fmt.Sprintf("pods \"%s\" not found", testPodName), err.Error()) + }) - assert.Equal(t, tc.expected.pods, actual.pods) - } + t.Run("resume ippool", func(t *testing.T) { + key := testIPPoolNamespace + "/" + testIPPoolName + givenIPAllocator := newTestIPAllocatorBuilder(). + IPSubnet(testNetworkName, testCIDR, testStartIP, testEndIP). + Build() + givenIPPool := newTestIPPoolBuilder(). + NetworkName(testNetworkName). + UnPaused(). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() + + expectedIPPool := newTestIPPoolBuilder(). + NetworkName(testNetworkName). + UnPaused(). + Available(100). + Used(0). + CacheReadyCondition(corev1.ConditionTrue, "", ""). + StoppedCondition(corev1.ConditionFalse, "", "").Build() + + clientset := fake.NewSimpleClientset() + err := clientset.Tracker().Add(givenIPPool) + if err != nil { + t.Fatal(err) + } + + handler := Handler{ + agentNamespace: "default", + agentImage: &config.Image{ + Repository: "rancher/harvester-vm-dhcp-controller", + Tag: "main", + }, + ipAllocator: givenIPAllocator, + metricsAllocator: metrics.New(), + ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + } + + ipPool, err := handler.OnChange(key, givenIPPool) + assert.Nil(t, err) + + SanitizeStatus(&expectedIPPool.Status) + SanitizeStatus(&ipPool.Status) + + assert.Equal(t, expectedIPPool, ipPool) + }) } func TestHandler_DeployAgent(t *testing.T) { diff --git a/pkg/controller/vmnetcfg/controller.go b/pkg/controller/vmnetcfg/controller.go index 299edad..1c8bc60 100644 --- a/pkg/controller/vmnetcfg/controller.go +++ b/pkg/controller/vmnetcfg/controller.go @@ -105,94 +105,49 @@ func (h *Handler) Allocate(vmNetCfg *networkv1.VirtualMachineNetworkConfig, stat var ncStatuses []networkv1.NetworkConfigStatus for _, nc := range vmNetCfg.Spec.NetworkConfigs { + ipPoolNamespace, ipPoolName := kv.RSplit(nc.NetworkName, "/") + ipPool, err := h.ippoolCache.Get(ipPoolNamespace, ipPoolName) + if err != nil { + return status, err + } + + if !networkv1.CacheReady.IsTrue(ipPool) { + return status, fmt.Errorf("ippool %s/%s is not ready", ipPoolNamespace, ipPoolName) + } + exists, err := h.cacheAllocator.HasMAC(nc.NetworkName, nc.MACAddress) if err != nil { return status, err } + + var ip string + if exists { // Recover IP from cache - - ip, err := h.cacheAllocator.GetIPByMAC(nc.NetworkName, nc.MACAddress) + ip, err = h.cacheAllocator.GetIPByMAC(nc.NetworkName, nc.MACAddress) if err != nil { return status, err } + } else { + dIP := net.IPv4zero.String() + if nc.IPAddress != nil { + dIP = *nc.IPAddress + } - // Prepare VirtualMachineNetworkConfig status - ncStatus := networkv1.NetworkConfigStatus{ - AllocatedIPAddress: ip, - MACAddress: nc.MACAddress, - NetworkName: nc.NetworkName, - State: networkv1.AllocatedState, + // Recover IP from status (resume from paused state) + if oIP, err := findIPAddressFromNetworkConfigStatusByMACAddress(vmNetCfg.Status.NetworkConfigs, nc.MACAddress); err == nil { + dIP = oIP } - ncStatuses = append(ncStatuses, ncStatus) - - // Update VirtualMachineNetworkConfig metrics - h.metricsAllocator.UpdateVmNetCfgStatus( - fmt.Sprintf("%s/%s", vmNetCfg.Namespace, vmNetCfg.Name), - ncStatus.NetworkName, - ncStatus.MACAddress, - ncStatus.AllocatedIPAddress, - string(ncStatus.State), - ) - - // Update IPPool status - ipPoolNamespace, ipPoolName := kv.RSplit(nc.NetworkName, "/") - if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - ipPool, err := h.ippoolCache.Get(ipPoolNamespace, ipPoolName) - if err != nil { - return err - } - - ipPoolCpy := ipPool.DeepCopy() - - ipv4Status := ipPoolCpy.Status.IPv4 - if ipv4Status == nil { - ipv4Status = new(networkv1.IPv4Status) - } - - allocated := ipv4Status.Allocated - if allocated == nil { - allocated = make(map[string]string) - } - - allocated[ip] = nc.MACAddress - - ipv4Status.Allocated = allocated - ipPoolCpy.Status.IPv4 = ipv4Status - - if !reflect.DeepEqual(ipPoolCpy, ipPool) { - logrus.Infof("(vmnetcfg.Allocate) update ippool %s/%s", ipPool.Namespace, ipPool.Name) - ipPoolCpy.Status.LastUpdate = metav1.Now() - _, err = h.ippoolClient.UpdateStatus(ipPoolCpy) - return err - } - - return nil - }); err != nil { + + // Allocate new IP + ip, err = h.ipAllocator.AllocateIP(nc.NetworkName, dIP) + if err != nil { return status, err } - continue - } - - // Allocate new IP - - dIP := net.IPv4zero.String() - if nc.IPAddress != nil { - dIP = *nc.IPAddress - } - // Recover IP from status (resume from paused state) - if oIP, err := findIPAddressFromNetworkConfigStatusByMACAddress(vmNetCfg.Status.NetworkConfigs, nc.MACAddress); err == nil { - dIP = oIP - } - - ip, err := h.ipAllocator.AllocateIP(nc.NetworkName, dIP) - if err != nil { - return status, err - } - - if err := h.cacheAllocator.AddMAC(nc.NetworkName, nc.MACAddress, ip); err != nil { - return status, err + if err := h.cacheAllocator.AddMAC(nc.NetworkName, nc.MACAddress, ip); err != nil { + return status, err + } } // Prepare VirtualMachineNetworkConfig status @@ -202,6 +157,7 @@ func (h *Handler) Allocate(vmNetCfg *networkv1.VirtualMachineNetworkConfig, stat NetworkName: nc.NetworkName, State: networkv1.AllocatedState, } + ncStatuses = append(ncStatuses, ncStatus) // Update VirtualMachineNetworkConfig metrics @@ -214,40 +170,29 @@ func (h *Handler) Allocate(vmNetCfg *networkv1.VirtualMachineNetworkConfig, stat ) // Update IPPool status - ipPoolNamespace, ipPoolName := kv.RSplit(nc.NetworkName, "/") - if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - ipPool, err := h.ippoolCache.Get(ipPoolNamespace, ipPoolName) - if err != nil { - return err - } + ipPoolCpy := ipPool.DeepCopy() - ipPoolCpy := ipPool.DeepCopy() - - ipv4Status := ipPoolCpy.Status.IPv4 - if ipv4Status == nil { - ipv4Status = new(networkv1.IPv4Status) - } + ipv4Status := ipPoolCpy.Status.IPv4 + if ipv4Status == nil { + ipv4Status = new(networkv1.IPv4Status) + } - allocated := ipv4Status.Allocated - if allocated == nil { - allocated = make(map[string]string) - } + allocated := ipv4Status.Allocated + if allocated == nil { + allocated = make(map[string]string) + } - allocated[ip] = nc.MACAddress + allocated[ip] = nc.MACAddress - ipv4Status.Allocated = allocated - ipPoolCpy.Status.IPv4 = ipv4Status + ipv4Status.Allocated = allocated + ipPoolCpy.Status.IPv4 = ipv4Status - if !reflect.DeepEqual(ipPoolCpy, ipPool) { - logrus.Infof("(vmnetcfg.Allocate) update ippool %s/%s", ipPool.Namespace, ipPool.Name) - ipPoolCpy.Status.LastUpdate = metav1.Now() - _, err = h.ippoolClient.UpdateStatus(ipPoolCpy) - return err + if !reflect.DeepEqual(ipPoolCpy, ipPool) { + logrus.Infof("(vmnetcfg.Allocate) update ippool %s/%s", ipPool.Namespace, ipPool.Name) + ipPoolCpy.Status.LastUpdate = metav1.Now() + if _, err = h.ippoolClient.UpdateStatus(ipPoolCpy); err != nil { + return status, err } - - return nil - }); err != nil { - return status, err } } diff --git a/pkg/controller/vmnetcfg/controller_test.go b/pkg/controller/vmnetcfg/controller_test.go index 26ceb8f..d358252 100644 --- a/pkg/controller/vmnetcfg/controller_test.go +++ b/pkg/controller/vmnetcfg/controller_test.go @@ -175,7 +175,8 @@ func TestHandler_Allocate(t *testing.T) { ServerIP(testServerIP). CIDR(testCIDR). PoolRange(testStartIP, testEndIP). - NetworkName(testNetworkName).Build() + NetworkName(testNetworkName). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() givenCacheAllocator := newTestCacheAllocatorBuilder(). MACSet(testNetworkName).Build() givenIPAllocator := newTestIPAllocatorBuilder(). @@ -190,7 +191,8 @@ func TestHandler_Allocate(t *testing.T) { PoolRange(testStartIP, testEndIP). NetworkName(testNetworkName). Allocated(testIPAddress1, testMACAddress1). - Allocated(testIPAddress2, testMACAddress2).Build() + Allocated(testIPAddress2, testMACAddress2). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() expectedCacheAllocator := newTestCacheAllocatorBuilder(). MACSet(testNetworkName). Add(testNetworkName, testMACAddress1, testIPAddress1). @@ -239,7 +241,8 @@ func TestHandler_Allocate(t *testing.T) { PoolRange(testStartIP, testEndIP). NetworkName(testNetworkName). Allocated(testIPAddress1, testMACAddress1). - Allocated(testIPAddress2, testMACAddress2).Build() + Allocated(testIPAddress2, testMACAddress2). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() givenCacheAllocator := newTestCacheAllocatorBuilder(). MACSet(testNetworkName).Build() givenIPAllocator := newTestIPAllocatorBuilder(). @@ -297,7 +300,8 @@ func TestHandler_Allocate(t *testing.T) { ServerIP(testServerIP). CIDR(testCIDR). PoolRange(testStartIP, testEndIP). - NetworkName(testNetworkName).Build() + NetworkName(testNetworkName). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() givenCacheAllocator := newTestCacheAllocatorBuilder(). MACSet(testNetworkName). Add(testNetworkName, testMACAddress1, testIPAddress1). @@ -312,7 +316,8 @@ func TestHandler_Allocate(t *testing.T) { PoolRange(testStartIP, testEndIP). NetworkName(testNetworkName). Allocated(testIPAddress1, testMACAddress1). - Allocated(testIPAddress2, testMACAddress2).Build() + Allocated(testIPAddress2, testMACAddress2). + CacheReadyCondition(corev1.ConditionTrue, "", "").Build() clientset := fake.NewSimpleClientset(givenIPPool) @@ -338,4 +343,26 @@ func TestHandler_Allocate(t *testing.T) { assert.Equal(t, expectedIPPool, ipPool) }) + + t.Run("ippool cache not ready", func(t *testing.T) { + givenVmNetCfg := newTestVmNetCfgBuilder(). + WithNetworkConfig(testIPAddress1, testMACAddress1, testNetworkName). + WithNetworkConfig(testIPAddress2, testMACAddress2, testNetworkName).Build() + givenIPPool := newTestIPPoolBuilder(). + ServerIP(testServerIP). + CIDR(testCIDR). + PoolRange(testStartIP, testEndIP). + NetworkName(testNetworkName). + CacheReadyCondition(corev1.ConditionFalse, "", "").Build() + + clientset := fake.NewSimpleClientset(givenIPPool) + + handler := Handler{ + ippoolClient: fakeclient.IPPoolClient(clientset.NetworkV1alpha1().IPPools), + ippoolCache: fakeclient.IPPoolCache(clientset.NetworkV1alpha1().IPPools), + } + + _, err := handler.Allocate(givenVmNetCfg, givenVmNetCfg.Status) + assert.NotNil(t, fmt.Sprintf("ippool %s/%s is not ready", testIPPoolNamespace, testIPPoolName), err) + }) }