Skip to content

Commit

Permalink
Merge pull request #597 from nilo19/fix/not-a-vmss
Browse files Browse the repository at this point in the history
fix: call the counterpart function of availabilitySet when the instance is not a vmss vm
  • Loading branch information
k8s-ci-robot authored Apr 22, 2021
2 parents 048e465 + d99b1b3 commit 83ec139
Show file tree
Hide file tree
Showing 5 changed files with 122 additions and 24 deletions.
27 changes: 17 additions & 10 deletions pkg/provider/azure_standard.go
Original file line number Diff line number Diff line change
Expand Up @@ -650,12 +650,7 @@ func (as *availabilitySet) GetPrivateIPsByNodeName(name string) ([]string, error

// getAgentPoolAvailabilitySets lists the virtual machines for the resource group and then builds
// a list of availability sets that match the nodes available to k8s.
func (as *availabilitySet) getAgentPoolAvailabilitySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
vms, err := as.ListVirtualMachines(as.ResourceGroup)
if err != nil {
klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
return nil, err
}
func (as *availabilitySet) getAgentPoolAvailabilitySets(vms []compute.VirtualMachine, nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
for vmx := range vms {
vm := vms[vmx]
Expand All @@ -672,8 +667,8 @@ func (as *availabilitySet) getAgentPoolAvailabilitySets(nodes []*v1.Node) (agent
}
asID, ok := vmNameToAvailabilitySetID[nodeName]
if !ok {
klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
return nil, fmt.Errorf("node (%s) - has no availability sets", nodeName)
klog.Warningf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
continue
}
if availabilitySetIDs.Has(asID) {
// already added in the list
Expand Down Expand Up @@ -708,7 +703,13 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
return availabilitySetNames, nil
}
availabilitySetNames, err = as.getAgentPoolAvailabilitySets(nodes)

vms, err := as.ListVirtualMachines(as.ResourceGroup)
if err != nil {
klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
return nil, err
}
availabilitySetNames, err = as.getAgentPoolAvailabilitySets(vms, nodes)
if err != nil {
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabilitySets failed err=(%v)", err)
return nil, err
Expand Down Expand Up @@ -1211,5 +1212,11 @@ func (as *availabilitySet) EnsureBackendPoolDeletedFromVMSets(vmasNamesMap map[s

// GetAgentPoolVMSetNames returns all VMAS names according to the nodes
func (as *availabilitySet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
return as.getAgentPoolAvailabilitySets(nodes)
vms, err := as.ListVirtualMachines(as.ResourceGroup)
if err != nil {
klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
return nil, err
}

return as.getAgentPoolAvailabilitySets(vms, nodes)
}
2 changes: 1 addition & 1 deletion pkg/provider/azure_standard_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1160,7 +1160,7 @@ func TestGetStandardVMSetNames(t *testing.T) {
},
},
},
expectedErrMsg: fmt.Errorf("node (vm2) - has no availability sets"),
expectedErrMsg: errors.New("no availability sets found for nodes, node count(1)"),
},
{
name: "GetVMSetNames should report the error if there's no such availability set",
Expand Down
36 changes: 34 additions & 2 deletions pkg/provider/azure_vmss.go
Original file line number Diff line number Diff line change
Expand Up @@ -1672,7 +1672,39 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(vmssNamesMap map[string]b
return nil
}

// GetAgentPoolVMSetNames returns all VMSS names according to the nodes
// GetAgentPoolVMSetNames returns all VMSS/VMAS names according to the nodes.
// We need to include the VMAS here because some of the cluster provisioning tools
// like capz allows mixed instance type.
func (ss *ScaleSet) GetAgentPoolVMSetNames(nodes []*v1.Node) (*[]string, error) {
return ss.getAgentPoolScaleSets(nodes)
vmSetNames := make([]string, 0)
as := ss.availabilitySet.(*availabilitySet)

for _, node := range nodes {
var names *[]string
managedByAS, err := ss.isNodeManagedByAvailabilitySet(node.Name, azcache.CacheReadTypeDefault)
if err != nil {
return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to check if the node %s is managed by VMAS: %w", node.Name, err)
}
if managedByAS {
cached, err := ss.availabilitySetNodesCache.Get(consts.AvailabilitySetNodesKey, azcache.CacheReadTypeDefault)
if err != nil {
return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to get availabilitySetNodesCache")
}
vms := cached.(availabilitySetNodeEntry).vms
names, err = as.getAgentPoolAvailabilitySets(vms, []*v1.Node{node})
if err != nil {
return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to execute getAgentPoolAvailabilitySets: %w", err)
}
vmSetNames = append(vmSetNames, *names...)
continue
}

names, err = ss.getAgentPoolScaleSets([]*v1.Node{node})
if err != nil {
return nil, fmt.Errorf("GetAgentPoolVMSetNames: failed to execute getAgentPoolScaleSets: %w", err)
}
vmSetNames = append(vmSetNames, *names...)
}

return &vmSetNames, nil
}
13 changes: 8 additions & 5 deletions pkg/provider/azure_vmss_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ type vmssEntry struct {
type availabilitySetNodeEntry struct {
vmNames sets.String
nodeNames sets.String
vms []compute.VirtualMachine
}

func (ss *ScaleSet) newVMSSCache() (*azcache.TimedCache, error) {
Expand Down Expand Up @@ -278,15 +279,16 @@ func (ss *ScaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error)
return nil, err
}

vmList := make([]compute.VirtualMachine, 0)
for _, resourceGroup := range resourceGroups.List() {
vmList, err := ss.Cloud.ListVirtualMachines(resourceGroup)
vms, err := ss.Cloud.ListVirtualMachines(resourceGroup)
if err != nil {
return nil, err
return nil, fmt.Errorf("newAvailabilitySetNodesCache: failed to list vms in the resource group %s: %w", resourceGroup, err)
}

for _, vm := range vmList {
for _, vm := range vms {
if vm.Name != nil {
vmNames.Insert(*vm.Name)
vmNames.Insert(to.String(vm.Name))
vmList = append(vmList, vm)
}
}
}
Expand All @@ -300,6 +302,7 @@ func (ss *ScaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error)
localCache := availabilitySetNodeEntry{
vmNames: vmNames,
nodeNames: nodeNames,
vms: vmList,
}

return localCache, nil
Expand Down
68 changes: 62 additions & 6 deletions pkg/provider/azure_vmss_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -874,7 +874,7 @@ func TestGetInstanceTypeByNodeName(t *testing.T) {
vmList: []string{"vmss-vm-000000"},
vmClientErr: &retry.Error{RawError: fmt.Errorf("error")},
expectedType: "",
expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: error"),
expectedErr: fmt.Errorf("newAvailabilitySetNodesCache: failed to list vms in the resource group rg: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: error"),
},
}

Expand All @@ -895,7 +895,7 @@ func TestGetInstanceTypeByNodeName(t *testing.T) {

sku, err := ss.GetInstanceTypeByNodeName("vmss-vm-000000")
if test.expectedErr != nil {
assert.EqualError(t, test.expectedErr, err.Error(), test.description+", but an error occurs")
assert.EqualError(t, err, test.expectedErr.Error(), test.description)
}
assert.Equal(t, test.expectedType, sku, test.description)
}
Expand Down Expand Up @@ -998,7 +998,7 @@ func TestGetPrimaryInterface(t *testing.T) {
vmList: []string{"vmss-vm-000000"},
hasPrimaryInterface: true,
vmClientErr: &retry.Error{RawError: fmt.Errorf("error")},
expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: error"),
expectedErr: fmt.Errorf("newAvailabilitySetNodesCache: failed to list vms in the resource group rg: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: error"),
},
{
description: "GetPrimaryInterface should report the error if vmss client returns retry error",
Expand Down Expand Up @@ -1079,7 +1079,7 @@ func TestGetPrimaryInterface(t *testing.T) {

nic, err := ss.GetPrimaryInterface(test.nodeName)
if test.expectedErr != nil {
assert.EqualError(t, test.expectedErr, err.Error(), test.description+", but an error occurs")
assert.EqualError(t, err, test.expectedErr.Error(), test.description)
}
assert.Equal(t, expectedInterface, nic, test.description)
}
Expand Down Expand Up @@ -1164,7 +1164,7 @@ func TestGetPrivateIPsByNodeName(t *testing.T) {
vmList: []string{"vmss-vm-000000"},
vmClientErr: &retry.Error{RawError: fmt.Errorf("error")},
expectedPrivateIPs: []string{},
expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: error"),
expectedErr: fmt.Errorf("newAvailabilitySetNodesCache: failed to list vms in the resource group rg: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: error"),
},
}

Expand Down Expand Up @@ -1192,7 +1192,7 @@ func TestGetPrivateIPsByNodeName(t *testing.T) {

privateIPs, err := ss.GetPrivateIPsByNodeName(test.nodeName)
if test.expectedErr != nil {
assert.EqualError(t, test.expectedErr, err.Error(), test.description+", but an error occurs")
assert.EqualError(t, err, test.expectedErr.Error(), test.description)
}
assert.Equal(t, test.expectedPrivateIPs, privateIPs, test.description)
}
Expand Down Expand Up @@ -2543,3 +2543,59 @@ func TestGetNodeCIDRMasksByProviderID(t *testing.T) {
})
}
}

func TestGetAgentPoolVMSetNamesMixedInstances(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()

ss, err := NewTestScaleSet(ctrl)
assert.NoError(t, err)

existingVMs := []compute.VirtualMachine{
{
Name: to.StringPtr("vm-0"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{
ID: to.StringPtr("vmas-0"),
},
},
},
{
Name: to.StringPtr("vm-1"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{
ID: to.StringPtr("vmas-1"),
},
},
},
}
expectedScaleSet := buildTestVMSS(testVMSSName, "vmssee6c2")
vmList := []string{"vmssee6c2000000", "vmssee6c2000001", "vmssee6c2000002"}
existingVMSSVMs, _, _ := buildTestVirtualMachineEnv(ss.cloud, testVMSSName, "", 0, vmList, "", false)

mockVMClient := ss.Cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(existingVMs, nil).AnyTimes()

mockVMSSClient := ss.Cloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
mockVMSSClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]compute.VirtualMachineScaleSet{expectedScaleSet}, nil)

mockVMSSVMClient := ss.Cloud.VirtualMachineScaleSetVMsClient.(*mockvmssvmclient.MockInterface)
mockVMSSVMClient.EXPECT().List(gomock.Any(), gomock.Any(), testVMSSName, gomock.Any()).Return(existingVMSSVMs, nil)

nodes := []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "vmssee6c2000000",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "vm-0",
},
},
}
expectedVMSetNames := &[]string{testVMSSName, "vmas-0"}
vmSetNames, err := ss.GetAgentPoolVMSetNames(nodes)
assert.NoError(t, err)
assert.Equal(t, expectedVMSetNames, vmSetNames)
}

0 comments on commit 83ec139

Please sign in to comment.