From 4b0964f7f979a3c9d6dd5aa63f79bae7395999bb Mon Sep 17 00:00:00 2001 From: Cameron McAvoy Date: Tue, 17 Oct 2023 16:51:13 -0500 Subject: [PATCH] Add AWSMachines to back the ec2 instances in AWSMachinePools and AWSManagedMachinePools --- ...ture.cluster.x-k8s.io_awsmachinepools.yaml | 4 + ...uster.x-k8s.io_awsmanagedmachinepools.yaml | 4 + config/rbac/role.yaml | 9 + controllers/awsmachine_controller.go | 24 ++- exp/api/v1beta1/conversion.go | 12 +- exp/api/v1beta1/zz_generated.conversion.go | 32 ++- exp/api/v1beta2/awsmachinepool_types.go | 4 + .../v1beta2/awsmanagedmachinepool_types.go | 4 + exp/api/v1beta2/conditions_consts.go | 5 + exp/api/v1beta2/types.go | 5 + exp/controllers/awsmachinepool_controller.go | 192 +++++++++++++++++- .../awsmachinepool_controller_test.go | 155 +++++++++++++- .../awsmanagedmachinepool_controller.go | 48 ++++- pkg/cloud/awserrors/errors.go | 3 + pkg/cloud/scope/machine.go | 11 + 15 files changed, 475 insertions(+), 37 deletions(-) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml index 7b6acd1ccc..9c1b86f662 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml @@ -1103,6 +1103,10 @@ spec: can be added as events to the Machine object and/or logged in the controller's output. type: string + infrastructureMachineKind: + description: InfrastructureMachineKind is the kind of the infrastructure + resources behind MachinePool Machines. + type: string instances: description: Instances contains the status for each instance in the pool diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml index 3b4e76e87c..10a16bb48f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml @@ -1043,6 +1043,10 @@ spec: can be added as events to the MachinePool object and/or logged in the controller's output. type: string + infrastructureMachineKind: + description: InfrastructureMachineKind is the kind of the infrastructure + resources behind MachinePool Machines. + type: string launchTemplateID: description: The ID of the launch template type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3ff4afe303..fdb81e86a3 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -126,6 +126,14 @@ rules: - cluster.x-k8s.io resources: - machines + verbs: + - delete + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: - machines/status verbs: - get @@ -310,6 +318,7 @@ rules: resources: - awsmachines verbs: + - create - delete - get - list diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index e468e4c2d7..5e0e826b71 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -143,10 +143,11 @@ func (r *AWSMachineReconciler) getObjectStoreService(scope scope.S3Scope) servic return s3.NewService(scope) } -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=get;list;watch;update;patch;delete -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=create;get;list;watch;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch @@ -459,6 +460,7 @@ func (r *AWSMachineReconciler) findInstance(machineScope *scope.MachineScope, ec return instance, nil } +//nolint:gocyclo func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *scope.MachineScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope, elbScope scope.ELBScope, objectStoreScope scope.S3Scope) (ctrl.Result, error) { machineScope.Trace("Reconciling AWSMachine") @@ -482,7 +484,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * } // Make sure bootstrap data is available and populated. - if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { + if !machineScope.IsMachinePoolMachine() && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { machineScope.Info("Bootstrap data secret reference is not yet available") conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil @@ -497,6 +499,12 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, err.Error()) return ctrl.Result{}, err } + if instance == nil && machineScope.IsMachinePoolMachine() { + err = errors.New("no instance found for machine pool") + machineScope.Error(err, "unable to find instance") + conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, err.Error()) + return ctrl.Result{}, err + } // If the AWSMachine doesn't have our finalizer, add it. if controllerutil.AddFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer) { @@ -596,9 +604,11 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * } // reconcile the deletion of the bootstrap data secret now that we have updated instance state - if deleteSecretErr := r.deleteBootstrapData(machineScope, clusterScope, objectStoreScope); deleteSecretErr != nil { - r.Log.Error(deleteSecretErr, "unable to delete secrets") - return ctrl.Result{}, deleteSecretErr + if !machineScope.IsMachinePoolMachine() { + if deleteSecretErr := r.deleteBootstrapData(machineScope, clusterScope, objectStoreScope); deleteSecretErr != nil { + r.Log.Error(deleteSecretErr, "unable to delete secrets") + return ctrl.Result{}, deleteSecretErr + } } if instance.State == infrav1.InstanceStateTerminated { diff --git a/exp/api/v1beta1/conversion.go b/exp/api/v1beta1/conversion.go index 16cf651fdf..47f4e0d2e8 100644 --- a/exp/api/v1beta1/conversion.go +++ b/exp/api/v1beta1/conversion.go @@ -50,6 +50,7 @@ func (src *AWSMachinePool) ConvertTo(dstRaw conversion.Hub) error { if restored.Spec.AvailabilityZoneSubnetType != nil { dst.Spec.AvailabilityZoneSubnetType = restored.Spec.AvailabilityZoneSubnetType } + dst.Status.InfrastructureMachineKind = restored.Status.InfrastructureMachineKind if restored.Spec.AWSLaunchTemplate.PrivateDNSName != nil { dst.Spec.AWSLaunchTemplate.PrivateDNSName = restored.Spec.AWSLaunchTemplate.PrivateDNSName @@ -80,7 +81,6 @@ func (src *AWSMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { // ConvertFrom converts the v1beta2 AWSMachinePoolList receiver to v1beta1 AWSMachinePoolList. func (r *AWSMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infrav1exp.AWSMachinePoolList) - return Convert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(src, r, nil) } @@ -110,6 +110,8 @@ func (src *AWSManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.AvailabilityZoneSubnetType = restored.Spec.AvailabilityZoneSubnetType } + dst.Status.InfrastructureMachineKind = restored.Status.InfrastructureMachineKind + return nil } @@ -129,6 +131,14 @@ func Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolS return autoConvert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(in, out, s) } +func Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *infrav1exp.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s apiconversion.Scope) error { + return autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in, out, s) +} + +func Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *infrav1exp.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s apiconversion.Scope) error { + return autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in, out, s) +} + // ConvertTo converts the v1beta1 AWSManagedMachinePoolList receiver to a v1beta2 AWSManagedMachinePoolList. func (src *AWSManagedMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infrav1exp.AWSManagedMachinePoolList) diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index 869a3c13d4..77ea73b217 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -100,11 +100,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*v1beta2.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePool)(nil), (*v1beta2.AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(a.(*AWSManagedMachinePool), b.(*v1beta2.AWSManagedMachinePool), scope) }); err != nil { @@ -135,11 +130,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedMachinePoolStatus)(nil), (*AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(a.(*v1beta2.AWSManagedMachinePoolStatus), b.(*AWSManagedMachinePoolStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BlockDeviceMapping)(nil), (*v1beta2.BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping(a.(*BlockDeviceMapping), b.(*v1beta2.BlockDeviceMapping), scope) }); err != nil { @@ -300,11 +290,21 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*v1beta2.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.AWSManagedMachinePoolSpec)(nil), (*AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(a.(*v1beta2.AWSManagedMachinePoolSpec), b.(*AWSManagedMachinePoolSpec), scope) }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.AWSManagedMachinePoolStatus)(nil), (*AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(a.(*v1beta2.AWSManagedMachinePoolStatus), b.(*AWSManagedMachinePoolStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta2.AutoScalingGroup)(nil), (*AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(a.(*v1beta2.AutoScalingGroup), b.(*AutoScalingGroup), scope) }); err != nil { @@ -587,17 +587,13 @@ func autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) + // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.ASGStatus = (*ASGStatus)(unsafe.Pointer(in.ASGStatus)) return nil } -// Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *v1beta2.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in, out, s) -} - func autoConvert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta2.AWSManagedMachinePool, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil { @@ -759,17 +755,13 @@ func autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachin out.Replicas = in.Replicas out.LaunchTemplateID = (*string)(unsafe.Pointer(in.LaunchTemplateID)) out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) + // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in *v1beta2.AWSManagedMachinePoolStatus, out *AWSManagedMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachinePoolStatus(in, out, s) -} - func autoConvert_v1beta1_AutoScalingGroup_To_v1beta2_AutoScalingGroup(in *AutoScalingGroup, out *v1beta2.AutoScalingGroup, s conversion.Scope) error { out.ID = in.ID out.Tags = *(*apiv1beta2.Tags)(unsafe.Pointer(&in.Tags)) diff --git a/exp/api/v1beta2/awsmachinepool_types.go b/exp/api/v1beta2/awsmachinepool_types.go index a9c26a3e60..22c503d71b 100644 --- a/exp/api/v1beta2/awsmachinepool_types.go +++ b/exp/api/v1beta2/awsmachinepool_types.go @@ -199,6 +199,10 @@ type AWSMachinePoolStatus struct { // +optional LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty"` + // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. + // +optional + InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` + // FailureReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. diff --git a/exp/api/v1beta2/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go index c7e70fcf55..e1e2dfc102 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go @@ -199,6 +199,10 @@ type AWSManagedMachinePoolStatus struct { // +optional LaunchTemplateVersion *string `json:"launchTemplateVersion,omitempty"` + // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. + // +optional + InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` + // FailureReason will be set in the event that there is a terminal problem // reconciling the MachinePool and will contain a succinct value suitable // for machine interpretation. diff --git a/exp/api/v1beta2/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go index 2d052fae53..d2824bb470 100644 --- a/exp/api/v1beta2/conditions_consts.go +++ b/exp/api/v1beta2/conditions_consts.go @@ -54,6 +54,11 @@ const ( InstanceRefreshNotReadyReason = "InstanceRefreshNotReady" // InstanceRefreshFailedReason used to report when there instance refresh is not initiated. InstanceRefreshFailedReason = "InstanceRefreshFailed" + + // AWSMachineCreationFailed reports if creating AWSMachines to represent ASG (machine pool) machines failed. + AWSMachineCreationFailed = "AWSMachineCreationFailed" + // AWSMachineDeletionFailed reports if deleting AWSMachines failed. + AWSMachineDeletionFailed = "AWSMachineDeletionFailed" ) const ( diff --git a/exp/api/v1beta2/types.go b/exp/api/v1beta2/types.go index ef589c2951..624028d4cd 100644 --- a/exp/api/v1beta2/types.go +++ b/exp/api/v1beta2/types.go @@ -22,6 +22,11 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ) +const ( + // KindMachinePool is a MachinePool resource Kind + KindMachinePool string = "MachinePool" +) + // EBS can be used to automatically set up EBS volumes when an instance is launched. type EBS struct { // Encrypted is whether the volume should be encrypted or not. diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 741cdcdb10..5ce2f1ce49 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -21,6 +21,8 @@ import ( "context" "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" @@ -172,16 +174,22 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque } }() + // Patch now so that the status and selectors are available. + awsMachinePool.Status.InfrastructureMachineKind = "AWSMachine" + if err := machinePoolScope.PatchObject(); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to patch AWSMachinePool status") + } + switch infraScope := infraCluster.(type) { case *scope.ManagedControlPlaneScope: if !awsMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { - return ctrl.Result{}, r.reconcileDelete(machinePoolScope, infraScope, infraScope) + return ctrl.Result{}, r.reconcileDelete(ctx, machinePoolScope, infraScope, infraScope) } return ctrl.Result{}, r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope) case *scope.ClusterScope: if !awsMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { - return ctrl.Result{}, r.reconcileDelete(machinePoolScope, infraScope, infraScope) + return ctrl.Result{}, r.reconcileDelete(ctx, machinePoolScope, infraScope, infraScope) } return ctrl.Result{}, r.reconcileNormal(ctx, machinePoolScope, infraScope, infraScope) @@ -298,6 +306,23 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP return nil } + awsMachineList, err := getAWSMachines(ctx, machinePoolScope.MachinePool, r.Client) + if err != nil { + return err + } + + if err := createAWSMachinesIfNotExists(ctx, awsMachineList, machinePoolScope.MachinePool, asg, machinePoolScope.GetLogger(), r.Client, ec2Svc); err != nil { + machinePoolScope.SetNotReady() + conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1.ConditionSeverityWarning, err.Error()) + return fmt.Errorf("failed to create awsmachines: %w", err) + } + + if err := deleteOrphanedAWSMachines(ctx, awsMachineList, asg, machinePoolScope.GetLogger(), r.Client); err != nil { + machinePoolScope.SetNotReady() + conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1.ConditionSeverityWarning, err.Error()) + return fmt.Errorf("failed to clean up awsmachines: %w", err) + } + if annotations.ReplicasManagedByExternalAutoscaler(machinePoolScope.MachinePool) { // Set MachinePool replicas to the ASG DesiredCapacity if *machinePoolScope.MachinePool.Spec.Replicas != *asg.DesiredCapacity { @@ -356,8 +381,11 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP return nil } -func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) error { +func (r *AWSMachinePoolReconciler) reconcileDelete(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, ec2Scope scope.EC2Scope) error { clusterScope.Info("Handling deleted AWSMachinePool") + if err := reconcileDeleteAWSMachines(ctx, machinePoolScope.MachinePool, r.Client, machinePoolScope.GetLogger()); err != nil { + return err + } ec2Svc := r.getEC2Service(ec2Scope) asgSvc := r.getASGService(clusterScope) @@ -415,6 +443,164 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(machinePoolScope *scope.Machi return nil } +func reconcileDeleteAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, client client.Client, l logr.Logger) error { + awsMachineList, err := getAWSMachines(ctx, mp, client) + if err != nil { + return err + } + for i := range awsMachineList.Items { + awsMachine := awsMachineList.Items[i] + if awsMachine.DeletionTimestamp.IsZero() { + continue + } + logger := l.WithValues("awsmachine", klog.KObj(&awsMachine)) + // delete the owner Machine resource for the AWSMachine so that CAPI can clean up gracefully + machine, err := util.GetOwnerMachine(ctx, client, awsMachine.ObjectMeta) + if err != nil { + logger.V(2).Info("failed to get owner machine") + continue + } + + if err := client.Delete(ctx, machine); err != nil { + logger.V(2).Info("failed to delete owner machine") + } + } + return nil +} + +func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { + awsMachineList := &infrav1.AWSMachineList{} + labels := map[string]string{ + clusterv1.MachinePoolNameLabel: mp.Name, + clusterv1.ClusterNameLabel: mp.Spec.ClusterName, + } + if err := kubeClient.List(ctx, awsMachineList, client.InNamespace(mp.Namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + return awsMachineList, nil +} + +func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *expclusterv1.MachinePool, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { + l.V(5).Info("creating missing awsmachines") + + providerIDToAWSMachine := make(map[string]infrav1.AWSMachine, len(awsMachineList.Items)) + for i := range awsMachineList.Items { + awsMachine := awsMachineList.Items[i] + if awsMachine.Spec.ProviderID == nil || *awsMachine.Spec.ProviderID == "" { + continue + } + providerID := *awsMachine.Spec.ProviderID + providerIDToAWSMachine[providerID] = awsMachine + } + + for i := range existingASG.Instances { + instanceID := existingASG.Instances[i].ID + providerID := fmt.Sprintf("aws:///%s/%s", existingASG.Instances[i].AvailabilityZone, instanceID) + + instanceLogger := l.WithValues("providerID", providerID, "instanceID", instanceID, "asg", existingASG.Name) + instanceLogger.V(5).Info("checking if machinepool awsmachine is up to date") + if _, exists := providerIDToAWSMachine[providerID]; exists { + continue + } + + instance, err := ec2Svc.InstanceIfExists(&instanceID) + if errors.Is(err, ec2.ErrInstanceNotFoundByID) { + instanceLogger.V(5).Info("instance not found, it may have already been deleted.") + continue + } + if err != nil { + return fmt.Errorf("failed to look up ec2 instance %q: %w", instanceID, err) + } + + securityGroups := make([]infrav1.AWSResourceReference, 0, len(instance.SecurityGroupIDs)) + for j := range instance.SecurityGroupIDs { + securityGroups = append(securityGroups, infrav1.AWSResourceReference{ + ID: aws.String(instance.SecurityGroupIDs[j]), + }) + } + + awsMachine := &infrav1.AWSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mp.Namespace, + GenerateName: fmt.Sprintf("%s-", existingASG.Name), + Labels: map[string]string{ + clusterv1.MachinePoolNameLabel: mp.Name, + clusterv1.ClusterNameLabel: mp.Spec.ClusterName, + }, + }, + Spec: infrav1.AWSMachineSpec{ + ProviderID: aws.String(providerID), + InstanceID: aws.String(instanceID), + + // Store some extra fields for informational purposes (not needed by CAPA) + AMI: infrav1.AMIReference{ + ID: aws.String(instance.ImageID), + }, + InstanceType: instance.Type, + PublicIP: aws.Bool(instance.PublicIP != nil), + SSHKeyName: instance.SSHKeyName, + InstanceMetadataOptions: instance.InstanceMetadataOptions, + IAMInstanceProfile: instance.IAMProfile, + AdditionalSecurityGroups: securityGroups, + Subnet: &infrav1.AWSResourceReference{ID: aws.String(instance.SubnetID)}, + RootVolume: instance.RootVolume, + NonRootVolumes: instance.NonRootVolumes, + NetworkInterfaces: instance.NetworkInterfaces, + CloudInit: infrav1.CloudInit{}, + SpotMarketOptions: instance.SpotMarketOptions, + Tenancy: instance.Tenancy, + }, + } + instanceLogger.V(5).Info("creating AWSMachine instance") + if err := client.Create(ctx, awsMachine); err != nil { + return fmt.Errorf("failed to create AWSMachine: %w", err) + } + } + return nil +} + +func deleteOrphanedAWSMachines(ctx context.Context, awsMachineList *infrav1.AWSMachineList, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client) error { + l.V(5).Info("Deleting orphaned awsmachines") + providerIDToInstance := make(map[string]infrav1.Instance, len(existingASG.Instances)) + for i := range existingASG.Instances { + providerID := fmt.Sprintf("aws:///%s/%s", existingASG.Instances[i].AvailabilityZone, existingASG.Instances[i].ID) + providerIDToInstance[providerID] = existingASG.Instances[i] + } + + for i := range awsMachineList.Items { + awsMachine := awsMachineList.Items[i] + if awsMachine.Spec.ProviderID == nil || *awsMachine.Spec.ProviderID == "" { + continue + } + + providerID := *awsMachine.Spec.ProviderID + if _, exists := providerIDToInstance[providerID]; exists { + continue + } + + machine, err := util.GetOwnerMachine(ctx, client, awsMachine.ObjectMeta) + if err != nil { + return fmt.Errorf("failed to get owner machine for %s/%s: %w", awsMachine.Namespace, awsMachine.Name, err) + } + machineLogger := l.WithValues("machine", klog.KObj(machine), "awsmachine", klog.KObj(&awsMachine), "ProviderID", providerID) + machineLogger.V(5).Info("Deleting orphaned machine") + if machine == nil { + machineLogger.Info("No machine owner found for AWSMachine, deleting AWSMachine anyway.") + if err := client.Delete(ctx, &awsMachine); err != nil { + return fmt.Errorf("failed to delete orphan awsMachine %s/%s: %w", awsMachine.Namespace, awsMachine.Name, err) + } + machineLogger.V(5).Info("deleted AWSMachine") + continue + } + + if err := client.Delete(ctx, machine); err != nil { + return fmt.Errorf("failed to delete orphan machine %s/%s: %w", machine.Namespace, machine.Name, err) + } + machineLogger.V(5).Info("deleted AWSMachine") + } + return nil +} + func (r *AWSMachinePoolReconciler) updatePool(machinePoolScope *scope.MachinePoolScope, clusterScope cloud.ClusterScoper, existingASG *expinfrav1.AutoScalingGroup) error { asgSvc := r.getASGService(clusterScope) diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 4902dbb7e7..f0dccbf1bd 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -101,6 +102,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }, }, }, + Status: expinfrav1.AWSMachinePoolStatus{}, } secret = &corev1.Secret{ @@ -135,6 +137,11 @@ func TestAWSMachinePoolReconciler(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: "default", + UID: "1", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "cluster.x-k8s.io/v1beta1", + Kind: "MachinePool", }, Spec: expclusterv1.MachinePoolSpec{ ClusterName: "test", @@ -173,6 +180,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { return reconSvc }, Recorder: recorder, + Client: testEnv.Client, } } @@ -282,6 +290,141 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(errors.Cause(err)).To(MatchError(expectedErr)) }) }) + t.Run("there are nodes in the asg which need awsmachines", func(t *testing.T) { + t.Run("should create awsmachines for the nodes", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) + + asg := &expinfrav1.AutoScalingGroup{ + Name: "name", + Instances: []infrav1.Instance{ + { + ID: "1", + }, + { + ID: "2", + }, + }, + Subnets: []string{}, + } + + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), ec2Svc, gomock.Any(), gomock.Any()).Return(nil) + asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(asg, nil) + ec2Svc.EXPECT().InstanceIfExists(aws.String("1")).Return(&infrav1.Instance{ID: "1", Type: "m6.2xlarge"}, nil) + ec2Svc.EXPECT().InstanceIfExists(aws.String("2")).Return(&infrav1.Instance{ID: "2", Type: "m6.2xlarge"}, nil) + asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil) + asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) + + err := reconciler.reconcileNormal(context.Background(), ms, cs, cs) + g.Expect(err).To(Succeed()) + + g.Eventually(func() int { + awsMachines := &infrav1.AWSMachineList{} + if err := testEnv.List(ctx, awsMachines, client.InNamespace(ms.AWSMachinePool.Namespace)); err != nil { + return -1 + } + return len(awsMachines.Items) + }).Should(BeEquivalentTo(len(asg.Instances))) + }) + t.Run("should delete awsmachines for nodes removed from the asg", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) + + asg := &expinfrav1.AutoScalingGroup{ + Name: "name", + Instances: []infrav1.Instance{ + { + ID: "1", + }, + }, + Subnets: []string{}, + } + g.Expect(testEnv.Create(context.Background(), &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ms.AWSMachinePool.Namespace, + Name: "name-1", + UID: "1", + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test", + }, + })).To(Succeed()) + g.Expect(testEnv.Create(context.Background(), &infrav1.AWSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ms.AWSMachinePool.Namespace, + Name: "name-1", + Labels: map[string]string{ + clusterv1.MachinePoolNameLabel: ms.MachinePool.Name, + clusterv1.ClusterNameLabel: ms.MachinePool.Spec.ClusterName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "v1beta1", + Kind: "Machine", + Name: "name-1", + UID: "1", + }, + }, + }, + Spec: infrav1.AWSMachineSpec{ + ProviderID: aws.String("1"), + InstanceType: "m6.2xlarge", + }, + })).To(Succeed()) + g.Expect(testEnv.Create(context.Background(), &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ms.AWSMachinePool.Namespace, + Name: "name-2", + UID: "2", + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test", + }, + })).To(Succeed()) + g.Expect(testEnv.Create(context.Background(), &infrav1.AWSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ms.AWSMachinePool.Namespace, + Name: "name-2", + Labels: map[string]string{ + clusterv1.MachinePoolNameLabel: ms.MachinePool.Name, + clusterv1.ClusterNameLabel: ms.MachinePool.Spec.ClusterName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "v1beta1", + Kind: "Machine", + Name: "name-2", + UID: "2", + }, + }, + }, + Spec: infrav1.AWSMachineSpec{ + ProviderID: aws.String("2"), + InstanceType: "m6.2xlarge", + }, + })).To(Succeed()) + + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), ec2Svc, gomock.Any(), gomock.Any()).Return(nil) + asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(asg, nil) + asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil) + asgSvc.EXPECT().UpdateASG(gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) + + err := reconciler.reconcileNormal(context.Background(), ms, cs, cs) + g.Expect(err).To(Succeed()) + + g.Eventually(func() int { + awsMachines := &infrav1.AWSMachineList{} + if err := testEnv.List(ctx, awsMachines, client.InNamespace(ms.AWSMachinePool.Namespace)); err != nil { + return -1 + } + return len(awsMachines.Items) + }).Should(BeEquivalentTo(len(asg.Instances))) + }) + }) t.Run("there's suspended processes provided during ASG creation", func(t *testing.T) { setSuspendedProcesses := func(t *testing.T, g *WithT) { t.Helper() @@ -398,9 +541,10 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } ms.MachinePool.Spec.Replicas = ptr.To[int32](0) - g.Expect(testEnv.Create(ctx, ms.MachinePool)).To(Succeed()) + g.Expect(testEnv.Create(ctx, ms.MachinePool.DeepCopy())).To(Succeed()) - _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs) + err := reconciler.reconcileNormal(context.Background(), ms, cs, cs) + g.Expect(err).To(Succeed()) g.Expect(*ms.MachinePool.Spec.Replicas).To(Equal(int32(1))) }) t.Run("No need to update Asg because asgNeedsUpdates is false and no subnets change", func(t *testing.T) { @@ -756,7 +900,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { expectedErr := errors.New("no connection available ") asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, expectedErr).AnyTimes() - err := reconciler.reconcileDelete(ms, cs, cs) + err := reconciler.reconcileDelete(context.Background(), ms, cs, cs) g.Expect(errors.Cause(err)).To(MatchError(expectedErr)) }) t.Run("should log and remove finalizer when no machinepool exists", func(t *testing.T) { @@ -771,7 +915,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { buf := new(bytes.Buffer) klog.SetOutput(buf) - err := reconciler.reconcileDelete(ms, cs, cs) + err := reconciler.reconcileDelete(context.Background(), ms, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Unable to locate ASG")) g.Expect(ms.AWSMachinePool.Finalizers).To(ConsistOf(metav1.FinalizerDeleteDependents)) @@ -792,7 +936,8 @@ func TestAWSMachinePoolReconciler(t *testing.T) { buf := new(bytes.Buffer) klog.SetOutput(buf) - err := reconciler.reconcileDelete(ms, cs, cs) + err := reconciler.reconcileDelete(context.Background(), ms, cs, cs) + g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachinePool.Status.Ready).To(BeFalse()) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("DeletionInProgress"))) diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go index 8c0d75c2ec..95f6e5b195 100644 --- a/exp/controllers/awsmanagedmachinepool_controller.go +++ b/exp/controllers/awsmanagedmachinepool_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -36,8 +37,10 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" + asgsvc "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" @@ -46,6 +49,7 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -137,6 +141,16 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr return reconcile.Result{}, nil } + ampHelper, err := patch.NewHelper(awsPool, r.Client) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to init AWSMachinePool patch helper") + } + awsPool.Status.InfrastructureMachineKind = "AWSMachine" + // Patch now so that the status and selectors are available. + if err := ampHelper.Patch(ctx, awsPool); err != nil { + return ctrl.Result{}, err + } + managedControlPlaneScope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ Client: r.Client, Logger: log, @@ -206,9 +220,16 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( } ekssvc := eks.NewNodegroupService(machinePoolScope) + asgsvc := r.getASGService(ec2Scope) ec2svc := r.getEC2Service(ec2Scope) reconSvc := r.getReconcileService(ec2Scope) + asgName := machinePoolScope.Name() + asg, err := asgsvc.ASGIfExists(&asgName) + if err != nil { + return fmt.Errorf("failed to query asg for %s: %w", asgName, err) + } + if machinePoolScope.ManagedMachinePool.Spec.AWSLaunchTemplate != nil { canUpdateLaunchTemplate := func() (bool, error) { return true, nil @@ -236,6 +257,23 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition) } + awsMachineList, err := getAWSMachines(ctx, machinePoolScope.MachinePool, r.Client) + if err != nil { + return err + } + + if err := createAWSMachinesIfNotExists(ctx, awsMachineList, machinePoolScope.MachinePool, asg, machinePoolScope.GetLogger(), r.Client, ec2svc); err != nil { + machinePoolScope.ManagedMachinePool.Status.Ready = false + conditions.MarkFalse(machinePoolScope.ManagedMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1.ConditionSeverityWarning, err.Error()) + return fmt.Errorf("failed to create missing awsmachines: %w", err) + } + + if err := deleteOrphanedAWSMachines(ctx, awsMachineList, asg, machinePoolScope.GetLogger(), r.Client); err != nil { + machinePoolScope.ManagedMachinePool.Status.Ready = false + conditions.MarkFalse(machinePoolScope.ManagedMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1.ConditionSeverityWarning, err.Error()) + return fmt.Errorf("failed to clean up dangling awsmachines: %w", err) + } + if err := ekssvc.ReconcilePool(ctx); err != nil { return errors.Wrapf(err, "failed to reconcile machine pool for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name) } @@ -244,12 +282,16 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( } func (r *AWSManagedMachinePoolReconciler) reconcileDelete( - _ context.Context, + ctx context.Context, machinePoolScope *scope.ManagedMachinePoolScope, ec2Scope scope.EC2Scope, ) error { machinePoolScope.Info("Reconciling deletion of AWSManagedMachinePool") + if err := reconcileDeleteAWSMachines(ctx, machinePoolScope.MachinePool, r.Client, machinePoolScope.GetLogger()); err != nil { + return err + } + ekssvc := eks.NewNodegroupService(machinePoolScope) ec2Svc := ec2.NewService(ec2Scope) @@ -345,6 +387,10 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema. } } +func (r *AWSManagedMachinePoolReconciler) getASGService(scope cloud.ClusterScoper) services.ASGInterface { + return asgsvc.NewService(scope) +} + func (r *AWSManagedMachinePoolReconciler) getEC2Service(scope scope.EC2Scope) services.EC2Interface { return ec2.NewService(scope) } diff --git a/pkg/cloud/awserrors/errors.go b/pkg/cloud/awserrors/errors.go index d51b41595c..765d3ce626 100644 --- a/pkg/cloud/awserrors/errors.go +++ b/pkg/cloud/awserrors/errors.go @@ -56,6 +56,7 @@ const ( VPCNotFound = "InvalidVpcID.NotFound" VPCMissingParameter = "MissingParameter" ErrCodeRepositoryAlreadyExistsException = "RepositoryAlreadyExistsException" + ASGNotFound = "AutoScalingGroup.NotFound" ) var _ error = &EC2Error{} @@ -172,6 +173,8 @@ func IsInvalidNotFoundError(err error) bool { return true case LaunchTemplateNameNotFound: return true + case ASGNotFound: + return true } } diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index 331c4c31e2..26e5b8bbbd 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -29,6 +29,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" @@ -114,6 +115,16 @@ func (m *MachineScope) IsControlPlane() bool { return util.IsControlPlaneMachine(m.Machine) } +// IsMachinePoolMachine returns true if the machine is created for a machinepool. +func (m *MachineScope) IsMachinePoolMachine() bool { + for _, owner := range m.Machine.OwnerReferences { + if owner.Kind == v1beta2.KindMachinePool { + return true + } + } + return false +} + // Role returns the machine role from the labels. func (m *MachineScope) Role() string { if util.IsControlPlaneMachine(m.Machine) {